antimony/0000755000175000017500000000000013341132421011676 5ustar tiagotiagoantimony/CMakeLists.txt0000644000175000017500000000221713341130426014443 0ustar tiagotiagocmake_minimum_required(VERSION 3.1) project(Antimony) set(CMAKE_BUILD_TYPE RELEASE) set(CMAKE_CXX_FLAGS "-Wall -Wextra -g -Werror=switch") set(CMAKE_CXX_FLAGS_RELEASE "-O3 -DRELEASE") set(CMAKE_CXX_FLAGS_DEBUG "-O0") ################################################################################ find_package(PythonLibs 3.3 REQUIRED) if (WIN32) add_definitions("-DBOOST_PYTHON_STATIC_LIB") endif() if (WIN32) find_package(Boost REQUIRED COMPONENTS python3) elseif (APPLE) foreach (PYTHON_NAME python3 python36 python37) find_package(Boost QUIET COMPONENTS ${PYTHON_NAME}) if (${Boost_FOUND}) break() endif() endforeach() elseif (UNIX) foreach (PYTHON_NAME python3 python-py36 python-py35 python-py34) find_package(Boost QUIET COMPONENTS ${PYTHON_NAME}) if (${Boost_FOUND}) break() endif() endforeach() endif() if (NOT ${Boost_FOUND}) message(FATAL_ERROR "Could not find boost::python3") endif() ################################################################################ add_subdirectory(lib/graph) add_subdirectory(lib/fab) add_subdirectory(app) antimony/doc/0000755000175000017500000000000013341130426012446 5ustar tiagotiagoantimony/doc/imgs/0000755000175000017500000000000013341130426013405 5ustar tiagotiagoantimony/doc/imgs/sub_inside.png0000644000175000017500000017556513341130426016262 0ustar tiagotiagoPNG  IHDRt2 DiCCPICC ProfileH WgXSI>$Л(JTa-$P"uqׂ+*b[]Y "vZ.l|sCq}v}<39=Ν;i+ŵdE`4>C 4 8X Aqq1Md5G׿sH\(PwP$G// bZQ>;֕#p ӑzǖ*K gpB/f!?N2T*87aM6/~!A j\ U|jYyd<ꢼmșMadbAJISJaa%~#F#l39IAZ GHeKp|j<[;>K"°# xv4< a]%NX<a-9 IūlxJ%;2TȆ`"O g#ޭHx4CF%&eIzI~Q0凲/UoFP9 Ǟ)'R<q3[0H3,( =@fTikW} ! 8 HQP%ȐMи`UybՈxG/@T]H/{p_sP'=J"չA\4䨕  v݁d]e )ACǂyϊeL65RN~H?NHCp$P$Ad٣T+}`(8s9*2Bor0G "dwK{q8I''N %45xUFsP AZNOwC C)EEhAriVa1?J&9@ kjxrMeh S>e 8+g }R]*v$u?&2 , c !&K `>B9,հ66{ Q8 g"\p͍vxz1 c`l`怹`^?`X*ea2L¾ʱ l=~Ž`'Xv{ub8p]G^x',/Kx5O@h<Œp$"H#2 91(#*jbш5BItH:%#d)$ 9r=#[kCBcӌh4Zm-6VJFN;-NmLggҗ7ӛmap`1bFqqxf&S[V[UjZV>"Է7_VoWej3m~Df6s>s-s4󵆆x <4i}běl&Mt~ǦhNL9NKOIߝI+dDelEDb?qi_fEfG_ʬNIR% ̎ޜ.'6ggN_nJ<#2YeSK> X†"]yU|xX_\U~ZCӵ˦_a?c%%?$g g625Adik1wyvgϙ |mB>.RN"E'~ߺu_Deʝ+?-.k[uDzMeoXBʱ+VWzzn0((ƬmXgnO%oTW`awEn ܴoHںr}['ۓǚ;w|)۩Ƴfexs=Wmo~p@qO?<}ס}?[0pYV7^RlHmh;2Hso_FѪczǖg_xDɉY'7Oi{j©-[ZOG>w&̩AgO;w#._Xw_=Zwr+mێ_ zZ3ף_1FͤoM-q'ߊ;^}T?cC^zc}ʧOk:\:vw^yͳ{Jc ? //^-ymz7=q=}W^~Lw'Ƨ>7~r//_ jLW;ةhp?s,s"@sՁ3 i@L&Tf+N u"*.*hk1೼wc_h~GYSg-h"? ih pHYs%%IR$iTXtXML:com.adobe.xmp 1652 456 @IDATx `[u?4i&M4KӇSۭ)La;'zŋxAr+*ýx@:ewll{j׵M.MӤINNN4'O]rruNNP$(@ P(@ P|p P(@ P(@ P  P(@ P(@ d(Zp܌(@ P(@ P (@ P(@ P@ e(@ P(@ P( P(@ P(@ d(Zp܌(@ P(@ P (@ P(@ P@ e(@ P(@ P( P(@ P(@ d(Zp܌(@ P(@ P (@ P(@ P@ e(@ P(@ P( P(@ P(@ d(Zp܌(@ P(@ P (@ P(@ P@ e(@ P(@ P( P(@ P(@ d(Zp܌(@ P(@ P (@ P(@ P@ e(@ P(@ P( P(@ P(@ d(Zp܌(@ P(@ P (@ P(@ P@ e(@ P(@ P( P(@ P(@ d(Zp܌(@ P(@ P (@ P(@ P@ e(@ P(@ P(KP(뫪jZxޙ\0)+^'aIrZ"Aϗd&r B)+=J%R* { ޚ *\pTHV)iP)ã:aR:{ tSU aV9 Jz0U=o *U[[| Q.D-/TJ=%|Kp"垒R?kdc¥ & p"S1i.]dWR^Iv,٥ >tؼN%+ˋL)+%Y(iT-P^,>|ɒ% WZyJ?,EQkkkdI,[ y_BCFwvv"/ 2jhh@FMMMu9H^GFy-_|ʕdd0PF BP/ 2JaIתUpJdBppCF8;#9‰=y-]4!6G72Zxq3B@^Nw^CF#ܐ… _h±p## ;/)/21 .2=o|!}ǤuKOOzJdcfWfcE&t$SùBpx5b Гz q̊VXGGGd&.(Ep="3U5zvre\47Ԕf(Rx+=@F0L*q."z4Bz E *igJKR\!#%8Is=&T*chJUIMf_܊(@ P(@ P,\y(@ P(@ P0(@ P(@ Ph*z(@ P(@ P0(@ P(@ Ph*z(@ P(@ P0(@ P(@ Ph*z(@ P(@ P0(@ P(@ Ph*z(@ P(@ P0(@ P(@ Ph*z(@ P(@ P0(@ P(@ Ph*z(@ P(@ P0(@ P(@ Ph*z(@ P(@ P0(@ P(@ Ph*z(@ P(@ P0(@ P(@ Ph*z(@ P(@ P0(@ P(@ Ph*z(@ P(@ P0(@ P(@ Ph*z(@ P(@ P0(@ P(@ Ph*z(@ P(@ P0(@ P(@ Ph*z(@ P(@ P0(@ P(@ Ph*z(@ P(@ P0(@ P(@ Ph*z(@ P(@ P0(@ P(@ PhE]]]2ZӣT*'&&K,iooJ3a:fŊ p$\! .]pB:33d㓪_|yUUd]i6-KnOMuwwtƿ%Ȩett4Ƨ뻺|>7Kp755m T g 9s&]b4-[>rr|js *e2pRB⋑%8t&0)|rIBr)|cl\,ŋdd9WZE */TnR555+WTT+'J)uˢEzղg?TMJa7Is=&\dVWWKv)VN\dK \c8%5N\d6447\do]JIJK) cMZ\L\#,Aq{|_ 8JpڊLϽ^o=E *Wf.`S='o c(cL}Y>@(ʀk,O %rw\R$-DQعP  n'81~PRq!(@K~NJ!/ +,=%! uVy@}xQGV*~GD>_,#V*;~M(M_"ىBPw,9+5Gp-"Wk"؇J~*AC P _N+9,3(@ P(@ Qp ]cjƄxJn[—bpRBwtF^.OJ1b|I P(@ P|'V4U` -27"E|B P 8Aa$#c`8H paiv_՘2(@ P(@ d)P-6dՑVSttRS"F EaJ3h (@ PJN@!n,V~m*DiVTRe#07Z-E P /@V(@ P@ >,Ȣ9QgF'(BQ)(@ P(@ (XpM|N O_%XT P(@ P`'|VVJViYj|*F% h *cm)@ P(@ PE$Z슢*e-jFBm6P"kNHBfu>/Z{ bwyQh%It`F^8wNZ:ܖ횪^J`2Z $tiilnk#׶qxM#}͠Mj*N!>62  m^5=i7P2`Ev YReJck:Z֨QԚۃ>;8Ȥm^pk@PZ^TT _@MzyC]gɒ%E_(@ d$ko!()nD PkIW%7_و\M>#j%ܣ +dd %PB{srBՁ95JSRUUcp^ۯ4Sj5Z0[!iMh\3ʼCcZ6kl WjCj˩HdREׄ$:,/\`qid|#$#Ԅdnϸ=.o mAԨuOLOD oe0`PU(P s(Z0W P@0|Oxo~9XC۵߿_&mntrlÂ>Bu\U i''Wavdp}nOl|mTm[l 3W!ڠ9]3J  1є`:u-er8!?!rx?LaAl.~~7;FLjJSӍ ]a7+h>% -^ "R(t_bo)DKg(P +9mӨ]<\Z ;>8"kw5UVT󍏅;rm5(Oe?Dv>sozh8'e:햖4r:WG P2.\822C+co(xXyI >9j# ͼW&jp( _,/vw+4.[M-Tj&(ƵǿÏnZ!/\hv%C P0kP(P ^}zW V LcՀa~T eU Ʒ GoV;M%@r}466Ư\kzQ(X `(@ P%*D;60kP4DoE ?3ezkCsl\)F@OPT# LpYH6g(E^^[9,XPu(Pz׊,(@ P`7jggM¼p92yר28V'nrׅo 8*D缢j5t; SOg yc4.dh.!@e;t\WSTj0W]S([ u+:یu/{oW}i*5r$BR(iL<1Q㞔4# O P)}W \w|+j2_k8o{nc ڤPkvG8245Ԥ(6fM.s;IFHAk:EV5hB LE ߖ*%9DA?M} G|^"\u/@`JK? >p)] Wl 5EFLB-YP*|a&)T*j֯QdcY=+UЪ*d̸zZhfBFZrʒžB'g)&IHT]'noQ?TPSSN\-YD{ϥɸxܐ@$ a[t|HPM|\RFAá;R9GHa]]]T R_X%JICFK.;s E& ,R+4770E$%,AF U)AlyI| ? [fyI#@F)&N-ONNڥ%H8B@ <󓝋ظeŹW|Շ =:8om 1c#ztAŨC@,12hMc-MOe3AӦnVXkjq|n˯Ҋk{!S9C-vQﰕehݬEnuXkBV .u5.~4MȀ M7}=WmL.B>8kTd^>33 z4E.5(@2MxL~B%<[t|$$ $h2;Tn'T{pp0!|Hk]R٭J>4bOIs71o-JZEJ8P) P4d).2ݒ ~Y\KNSSSSs.*CB&Ɏ3g@OJ! T*J_Ā;/ԭE_K  lTihzj6CԪ)=hhMF9,n2r]{1:`qȕz- I2A>[hʗ<ӣNyPN5u768_Na͗v=YS#6Ngf&b YRl{뇏OwKb9(* I P ht6M< č}Ņ(@ Hr-|n-f{&РO\>c ەe:S&LMnѺJm3=Q(Xne?]>MX/^cg%| )@ P(@ P {F7d !`R-J@T>i`}^)烈{ zb%(P7n܀ƍׄf<}lX/x*/|SԯٰN[.O P@qE5iF"(2,ji-w\V I;f w?5(=*w&\15EW~wg:M1B?ׯ#k֢&ӎ/߭~X׭=sQNؚtXO<#}#L[MUC?QKz.__΁K[Hs75ǔ}as]̜_-l|<'2 P@y ,&;1Kjw;uV~1yM|zʕ4'u: |C坳r>ўĩ w4~c?z>/ٴ7w7BZut;}h ^PӺ>Eo[}Bt~̳ΛQJMF<U|wCEɭ/8H+s\뜿-kd~oU}%b 휏JĔ)@ P+PB1هV`œv4ĆI 2nWĿr_SՏ_N{6oqSgR?|Vf\D[]5utbTOdoM\"Tk֯i[b~졭?YN ;e`qw5zҜN(@ P(ZdT׊``)(@y Tayo |^=Zk׬%_r4o~G"㝑W|B4گ#o~'x9mgCj qx]QQ*+?~oybV<}?1Luن_&+/6YzTkzchsۑ(}}dnm ;spm[z O?fYuťbcq+> ;'n!e]-ZVy'v tSʏnk,7R\7[z;7y'ϒy;~-xbϓ 5m-=4߹xPm9&xi?s1+qϽ Wn k?q;|6>>.E |e477܈oR(PJ $ TJ P@&p*;{lȈF=\7}Ss`CAg:{/iR5{ӯ i>"^3R@>u1vן Yx&N]vm\wli[Y-jF5a\Vzzq+qY@_*H" G>I0dɒ%矟2kO?_R(@!k(zSS\h@hѢ#GDڲeY<β+BYC(?ZFKXa 2uVt4_D"eՓ+ ݬvC[*Sxd[w,zgR0)Sۓd}[u=wD0uvzbe>qidA ?@tBtJFHqHR2Y(U\yٯ ]ٮd#O}^cύ]u{x{`7yoPGH8<e#_ o{V(OF۝>3. d#ǘ9Dҙ Gȣ f,  Pr-Pn F*U[[Z+(@ ^`8e :u*2qs"ş`­fD (D"5t[#A V Μe0q7ݵuCvmݺ˦_OdǶ=!GKc aj g-<#ͯ0umЅpi=NzZ$ٴ7ёH hgG=6wߟE;mOG7FVVv,w;٭;E-[mF"mE"~(1Jz[K P@I?z|<6̒vGg7CO}jzu ZSZlDG^XUu~3Ryܢd{r==d8,r[.Д=G5,b{6bA(5ej(@)f]TIqL^nhp0@zp˫4*Cd / X)s tgS1%z9{C0g8rٽ4Z몞H Nս(0N ;4+~R VՃvDZuܵ'ׇQȌ5ڞ-]I7|$R8_cq MT!go鳟z'g;bb}m ιs7^ڞ >(@ @ ً~mLķx.~Yk/pQ;\c0߄ko = !}v(NtVmt".or箾h>A4 ,I"k_cqN _|_4V4n-4KT]wx7\sc==.DFT(yn~cO k8crl|y8~7q乭{㇙ &gN T?0G槎T6'&>"Y&)@ Pg1LeӟE^SrŖIYmVv;&'Kk֞!k9kOLm*T@o_޴A]]==:GMm8u` <emZ|8 %#aBos}m=̩^Te'l U+S:gM+~c?4vGb8HTL(@(fYI P^|m`f&@Y-p9v)_S[g߿spԹ/5&;wJ+Z(@ d*kq; P#p>]{M1E޸}1 @'df5[Kc\o>.qRE(@ H"k03 Px~O|.9!گ/ PڽPCQױ.f`(4/(@ [.d(@ #^6fMͻY^/nJ(@i}ūѣ ~_3%Ad(@ P5*BCŭڢghΜ9S~(PT)i:IR $ٻ\N P@ &W(P Fڸ#QbX8x1(EwĉnJ2,3(@ P SP ] "v{VIpc PS;y$(@\ސ)P( }XlQ^QMMMTf!)@ &18{lmmdXŹX* P(-Jk2g^x-A֕Fph4b̵VZ(Pr`0k8r Z(@Rx ]̸TrR(@ P(*E]]]aZ+WH0ҥK;::0'+ONTuuDNL@IDATHgg'bH===p(ONBFFFrZDkC+V-766R81.{NJ"Tj^WzsZ)*㙚I)Yz5vRpSbX[bMyKu?nyoo%?^R@% hӾ}2NR*Y@y%M fEK,(@ P(@{Z+*G`W]w*_~ɝtw\L#D`4ym•)@ P I%|(@A5 *N`Gk^Fr`k m)`!\,Ӥ(@`Њݬ,(@cGshTх (@ P(@y06,J P<ffdx;.\"~9P(@ P@2PH&Y׮ZܨWz|)/f9_R 3Bj3ۖ[Qrn㩜(@2`pv(C P -\A}|I\S(m˭(P9r9{TfM)@ P E^;U(_82r7?yn|ո(@ P  P*kx&ܠª ea)@ P(@ Kyqqe Pe%/uWk?\B P(@ P1 ŀ%(@ o/\iLR(@ P@DPD_=w,5b_K(@ P(@ TkYw P2M?goѺ?l6j P(@ P\@ PM\?E-r!(@ P(@ (@ P.zVxLzՆe}lNZ$266x#ϭ@UUJm9NMkkʚZr*UZm]J{=FrF(M&hU0b> RFE*H)(@ PK,?(@ d/03#/vw{S<|ӥ7{u{:~|/iT׫I9קF'1cCV w\~枅 {mS/)b+5zR>ma SP CG2Zo0?rDJ"WjM99{_(c 'gSx{[MzW MjU(@ 7gy2! P(>go":.lJ<^q{>`i:D+⁣gIܱ{-毜?S%jԊTmmhȖv+||,# D C~;"p|$L3&  \$ew eJԚ޻0* ޼G' s(@ PDMgmC Pe,0hu\s)Ɔ?(lj~$s*}[Ț7wGE7./R4Hdr8n!Էuʴ} 568?hM4kKp[kD!OMo;M P(P8Ξ9S(>SO#ElonW}1R[5Va>+w;:8W~$GIa&[3ByO3o&٫OmCC6hq;hTc6!Ԗ٧$n22gt(@ P7 ڂ F#0 _9;u <} ux%+G<:πZ\6kԢCUTͭC))F!dsoS(@ @pi2|MXq.Heg.]]]---(@ P(j )+Vt&Nszz>X8 KI P5k &XYuBsER)3J/2x_-5*XMo=Kk3O &ܱ)ꑥ_ә"G)UF)SMJ4rT!ISa$Jf.%B2 P(P*(luuuooo$ӧz-Bx -DY+RPpq8P(X=UgVgn~jc(@ P(Rb aTYSCo]!bQ?{;#;4rJ4mb9:c?/]F 'Of M{w ,Hlp' œ/Oa,\O=(d89<mɧ(@ PE(P,N3*ER `P_VC˗ct[ٻ7O}? p9(V1:>w0EQ|>|-?1^$нA#;x%'[ 0OAO F$Wg(@ P  .sǜ _U[[n\q:V\@V(~0?P)`!Pji[evܤ5&mN ,Ӭ;._k EӪOʞz#Z&S&P/Y-bm> >s?(^Vg1BU_^J>j\<40Žq4\@ PJI(k-}ձcYAV)@x˅o 89s]>̻lOl.Qj񼶦Z.GC? pqv|B,TћeOm\҅WnLD6 -}cGmJ4*W2(VQ %:L\SHn^zCgMjP f0Wq&N P*PZ]]`*SǓ \+6 LP0hf2RZv{0t`0xJE2qDtW \w|+0m^Cs=E&S/z%^}޵95tK3眯tkt)vk++>U a4C߄=z(UZu &v9^Ry2JP.:GNY@%2bF(@ PX ?ڂ "a"K@ `fSY+Knl{9UO03hM߽p5uaP+ҎKxZp}~GImB{5` DP[U+k(Bmi5 PM〵B01 X^0&T(BP(@ G5U$3r;K X=vPF}\돏y_tRhY{/hoxltCu݃Ƿ@FZAE  +V`G6S2iE P nP]'-_g绰2@Ç5/\cbbʨohwmx̫M*E΋blV{?q%78Ž#V.cl9ժ1 1ĘjN~;foslDg%B?=}ͱ6pbL X& ŧ%H%ytcw8ct1Ik.[t)wEbyww'S:;;qgϞ\$* PԸÍ!Dp9 p ף\$$ ̞?$8$ 2FC3d\>=[q cyŋq<44׌72w~8$# (|pHoa!]{{bGBVT?S8g g#GDoW$z9)N"F^srJ!> Λ+D)rG_ %@~p6 O Sիpf[[J/xUߦ7&ߗl6= W6==B/H&EW$+3H r]{!Ш/0|]5f5BKY*VBÌ{\1Oy3 ,9aq-5]}b6~2y+/q߁1 =Sp,)'d܅;Zd@a;;7NaMr Ew^|Z/ȍqB.(nn3k&]Pf\-YS`@8&|UJ‹f(+ OxTI'3@R$;K;POᑴ9}C(iV !T4Iѡ)fb@X Z'OӏUs6nM\9Q(@負 Gd~oq;f 9 Zi\(@ ` L"׼8(@ D^/M݉{]0&1x9f'WbAtXRU(XB9#(@bp;)(/ѢcA(@xt Afpŋh̎1%@ ;~9@5&s5` {B&KV$wFJA &}qi 5Ñe~`n  M9ER1ShZ'_laS}@0LzuhXP_ퟩw4=/E P$ 72h{$ᔤ̄DQAQsZZZBlYg7|S\+CP^$2ZS_g&,W jc>;ܩ `%'pcfﷺoeõ)@ P@0U"54UPD:oL(+(~<@hΆ~m2P# 7y5PdEF'Ex5ٔm٧ SaAk?K$斛$CQN#>k DP(@ Nm7VXR~, cY+a;DfFC)2!0H)3ʥ(|mc#VeE臩H>:l`ʦV9ףj#Ѳ(@ P,ԄAcxon3 F ?~k6ŃNW'.^b t6<äSMN&} IHꆉ"'H\貙֪I aH|(@ Pdpͼh"AK*^pt-|(Ik˗/G^

<|gp-%__-SdSێZc je?ȋޒJW fuNg:;'\~-nrUFns&z-UWkl5gdgBVscjez &Z( Z,#9"jEX:[EIȵF3) oHQ]]%NSԚۃ%/YM(@ P%"* 9>}:߰KfEWL"}op,FȤ YVstǸ`p-[lW]クyۑѺ|xm:0B&j֐Jڃީb^:0ǁF)O1]g)$Zɫ1_.) n/WVh5BW5YG#JkX>54ԩe9VJ+ g.d,VɂO(0?}txnym=>DOӰn3Ѷk[t)Ë[^hߔ;{''W } iεD2U)OUJSN,*W܈,xT;Ψ^9T$;2]}ND'b.Iѹj0! S`b 798:yęG M:;'SJ=2 P%-j`-S `Pl e;qDI!kxȸ׬# k7oZlJ5zh-uJyyWY4ev)dwlpP)úI>`fә An&%:l6M.lqrPN\fnⲍ\Z8@ѳ֟(@Jj#@(5p:y:ks깠F&k\٫/Y4c>o<߬ޕ.epm(Y hs[Dώ7h ZcOIߥ(@ PȥRUprMӒ+Z5FEr|ڍ@Ç|T'4*~Bid.0^k)\6w^s+0ْsk/*z`Oq1pQr#<: ܅&ŃOL(x`_B]ބkP(@wsgGijVZE^:c;7'! IHR_ miɇ@iKK[^/--oh?Cڒ@ )i ل\Ilo{eJ+f=3#iui}7H3Iv%!\\UJlXɸwqMcZYPm3g6PlyZK[%#ÒM%+//jK`VJ9_ci_\^el@Xߜݎ޷_5l+ce_[Nu &FA@CICch8g=]w;صcE 7?>J-eMx>h9Sx{T5ĩxH kmBB{bU\UY{m6cstt<ϏItf4KZYÚ W9OoKfrn"WHD 2EV# *3kt&D#qI2f1 C5T zfXXT.CsF4+YB6L7coj˷TE)aU+@@@:m[Igf{׸^9V7\&կ3PXP4ac~^n!n Hm*%Qj؋G+mԴz|;=7*Ta鉴qfI(jV[b;5_$mTaZP  3HCĘozdHCi""DɃ*΍h9&ӗ~eoqp2$Ku|jd.ԑ@!|ڼbi_x^OG9"O~^*=%rer+H|U.[$&"L̐ʎ;S ~xl?囨}}rCobUt'|O*Uk`8vxbW,קs(3TRG[ 3ٝ6zBbm;Twޝ]c+Sl~9کWk.,R~OӏOs"3C ԝ)`@Ƀ}Cy۷%çK/3bD'O1}[} L/>hys22ټsemStS1 il19." \ Y %&uc-}@<` PsY<4 tkT< Vl"ȗ3 37KLFP=- P*tߚ6w+R22 ,FS&nBl0?߽7n)}?=/ V%   M#@AQ-6u7hZF2wo-/5jv-~icXsCMK3e ӧ3tm;T1 1O3\ҋ`Rܶи&i mt>o$0:js`ƁY?V؜,?ղd\d. 7Y7rڙOI9reJnmXѾ#zմJ KꤟY-f1d4rOx3VMjq2*5QM#Zwc9,W]U~?y!kfX8M=]Ȃ':V@ ҟ~4 Uڶ!r)B*fR\rSzk.$̝='JR75yFrJB*mzik(c%N= fү F6хb^mVXLL8YNvxRk5TOfOcMiEE*G1Yᨯ o_5zHLbB4n9bJI2Җ&XW' =|@Zl\˝Pݾi S9oLf$bѺbssSMp!kZ5lĕ=wkZQw˞⦔`szL*(>k\Av>;BmA$=?uzЙ~{hs=pֺVnPwOyq-tǞkլ}ϯuig4r2iǂtl2vr=ʨNrn8yur$ ?McMd>d]錽}l$@^&[0N䌟|˰r5gdM[AG8ʙ."3U2.-.G.-Z ظLON[NDEߡG.UI@䣡|UJ@uhg6?bR`S kX   "zotRw% +>-{/Y/Tk,G¸FC ouj5ҔKC ; eMbBs˚1zuE}FdM(39cb[%7%eBMp)i˚B"mv-bb5Ao(Kkʜb'Nq;,V`.Hngz׊d`9 Csh;4 ӛ z WGhäѕJ4h`U7lܽIft3鉞-3A!7XANhlҎh41drcWʧ)c9hif1(15*K9|& -g'kumhN.!8)Gd5eZC ->œYߕ \Vq@ezzz}mV?>7ctҫ\1:_3 za987W9ob[=wl(O^yvLC }LzT\L`u1X9sjĸgJ2cXsGO <-c+VQUf~Ej˙$SӅy`  \ss{w]_#Z}g>q@5u_#i&9YY^o4P&K읇 ]SASNH[}m졃mYlLjB0cJ#c^MCUJdjPaºnFL&!װbP@.6UXg'OΝFϟ|b"w4i.0l{zMy'G]7Asy#]%:!wtgܷŇ)Y'2x!Qm*?b"$gjKg䘜FKR]H,V>бYC+^m[C1Wcm#\:Xo9Aڼy3,CHǎkh Й|%t/,tX_:ctRjqL|%G#'C'N΄ƧCHܼR~vINGIm~]{w7b{<4uR}QDMdˇ|]BәTw E@am<xo߽k}ԇ6{o{ӈr'_|֗9|J]~txu7Us-ƷqK9o#D6.>_X2 ˙6k:b$VyΙdp>뷐?=1uNYJVRpz:j4 UF6&xn}'=Z6I [hdXZ8A>r5x\'2~巪sYE_s+nqwޱGYz~ӦM_䪇zWIرcPaAr;{l(R<7ɛ.veDu4Wow_צQąpi3:<ŧ/Ч 8wgnb z5̶{R&?++dy ܳӥa4>{0^9-Bִ&mᅸBPlO1(v R.j.197frLW*xn6 "}9Npf`KLF nz2P "`XFFF^lݾ};h?tPQߔdxx덾n|o$5x^_zz;SzYQWї i2x*'PΥf{k4]3tQ2Li4}ӟ'yFzsr 2쮼K*R `&rim1:XlsXs;:zi ⣁l׌6$3ڜF[N"(GP1 t^UNG<fH 6b(i BurLLLPθF (BSmYBz4%%ͱGV2eD@ 9fύOlKW PaCrSzv̹=ڕy%)DL` .*)LVU j=oq&tNi[nt<1a=2PkP1rѳ瓮Arٸ9ROrɝ"E'UKjOg-VERUNJ0d}ڣg* \Ox˜ 8+*9p{bVPAVtn)ڴ b}b{ۭ7T5~KMOT*_oXi~:g!~=l|?gp4~wu_v:wf#w_R@GZHq6d(A(q2Ju0B2%r=1cGBIAve:{ޤOPc?wI%[Tbn7Js^)ܪpT |.}BIִqSbD|d9-a;q x(wbjW_&n۶*еkF JC9@KЛa˕^j@Kq_4ҷ~߳}7jsn+XB4[%澱=%3o˵ۘǖ}׮zLK[˿P̋2>v^.bLd!=nۻ͡c!Qa8ycj۟sڻɢ]G@IDATcSG/ŕ^"ƼSKE3.}PX,㱖 N3b;k-T)pG،gLcȻZ&Ɋ rzi'Y݅ZY4{tUdtlߕGPS,TLp4D!՘@IXb{+jJ^\P! 3 ޻zqy+=Ē}\_w{<ΘKmFIj+ /g;k} B\G_c>6~ :%#Ϗ3[dS2 N@ Lug㿉|Kd@@J^jsfVOk4zp LJ"c &L&[mpl?yi$bhٽeî-˶^sV<߁7fTk]z[/S+_LMxG2+}gf*q.߹1*.Hd <,kM䍮@viҖ5ek: و{{uЪ B)+sE7D,ZpAl ݜr7{!6ZFf,4-" @@h6:(#?_j~ܔgRxl2H1vRakV7 nuJ6ӭ:NCnʊw] GEA3ۘ]*P$'/iaO冪q C\r]s?3 44FQ`b%4wy;k< O@ûFK[^|f횺E7-JEԱHHU2$T@` = Veu ׸BM m*OfuG@VAB= BKWt oV쐒lk#s[[Ż> p[wS~,9|O1:F3xLJνy k-;r|斑Qj`0b*RX ӒWbK>N7dd/7}QQhF˩XrxzvM`ѐ}dԀX*3]qi)ԞST>@S%n_ 6.t\칱qhwZ"=b@@vOoIm-@s޻|\2QXWfkM=u`Q8jj_c=W|j̑-[ h>S$(3r0Y;qzAx4+Focm1¾A7߽ҫ蜪$£9sʾ>.DjUA  R421[Xߕ i    %;0cuy"гwwɪ=a>P*Yjlp6C K,L55 :mV+Y{ ˁڪPNMFQ'*ҮzM]TI6^{&F|RIJF&\U  dρ4!rqqCqф!@@D/,$ ?>'F?wTR}ܩ-)Ƽ~etӲeRLkB˙w V] [VCR)\KS  z.VՇO3gB{[RH-"uZr)@ zUԠMЧ6 ] ;p­W5uNGl9V@@`̞gņb5-#xRNUK^ؾFY؋%eSҧWzU45䙧3Frhp:6o2m-\jbD#[$7) z'D(1*jҜZ&Bn/9b(r?0ylot gݝLK7XO{{T:6g@@Lwդ;LP#.cIjJs{PhX=+<W {^Qfŧ'_LfכE34<~?Sڪp|||J߹s6vך@e '&˧Sԯu )&f|,Zض %ػRUq=_)49رcv" }@ @OQctfrb3gΨTmXk C   8C1(Ƃ 3q700ۻ+Q_MpWe2jyE 9tQOOϊ*^]Ίz=ud4WY{ڜAuww7mP6llYQ`AVY{(~x4(X њ+j  %&7Z.Jdqy-%torIweh[a[gVXjXsn2Xsn293i-d.)5!Eփ\CVY{4gP4q:jΠhhjS^VL(g\mv{>rQ_n]BꈠS]CCCW,tᡎssWC39׃[Q^tFj`dddҼ:iҠsܲeKsE!TSg mtA5 O'=NPM@@ڊ]5ޜLOլ/3ڥ t'b}ܺ4~FD*Wl=РѴ5yI5Y#+o!v&8Q:r:_UhΠ|L A2Z(&p8L׫*ˋLDpp.ĂmUqm@@@N8ͬ )evPc[ [Kx(PX͈DڿUVT1T)AVU\bQg$[6rTeA@ukQE٬X,*.9SVZ%]GLD~ Pv*甗hj{~ǧr*tZWWF8h P%!fO⴬ty]ve4Q4[VȦv_f =k`DBÏe@en%]B[BțIҎwuE[dâXmYSBe\dT9?}:T`4mm\3;v}hG@&dQ|qѠ@L[,áY"Zz@SDs3kخqƁL@ft>L ʓeLܲRV,]Š]TXACeIv\ [҉Qj`@Ns;K'gt%   P#5He‡ӳ~UDs YOV~Ѣtvɑvvv9:4|W]o|l/-K- Iv*$^mZ;n^ey:"pۇMf)drO`v   P;65$,r͑e 3@k?ސ Pt:zEi&%͛ɻ.,oxNgEw 1FO!`ei*CD1r jCﴯRAsY['@@j'Ц5`Fڇ  P_H[nl۶W^%!c}j˶O Z#x ?/}.*+‹D*/z*"H2oZ/\=p[!j&|= ZXh mMk҈k 6e6h[#lr|͂~:@m[633344Dh2E]Ya_zVXaĥS37&oD" ɕպlsD]*۔^HdG"hFM}QN+G9 T<yIo71LIa_k@kEv_sQbǦQ  u!Ў5zLZ04{ilwwwv+ țS)U(yWH)ŀ磸l[]o٠+j埋?ď_xTL]+G@"v1q&=%Sgs:Wk ktwx011UHJVaQu54+*_Xu?y&R݅B@Ȭfەad0C@ ;wnttz3it˿eē|ī'UM,#ZKc/ƼN jY鶈?$"2ՖK%,ϩR2LR),#trY%_+w4*"-  ʠ%   P/uMTS>zr@@qhr(e3P|K7lƵMN7tNnHt?8B O(3b`!<ϲZ(8h3/2{ Q'idr@} q+#lGHXrOYֶ#uwTUos#/*iK2%U LQ ƭr %t [=Q|L8   a˸F)~8S|m;@@]& RJmk/+t|3X:WWG@ f1bks@@ɔ*FLtG Z@6o߰m@q/(.G ԝ@{49rB?TUP  6 qmm2^?b9E͈M Rx,YdiT\%9"ǯߺѢ:?g"VE!}vk4yra_q9J@@@`\k>^" PկW+AcZى5LȺrn)KCj3,@`#qkjSݷv1vϟV݅BhgãWݸXåwb3 %  mJƵ6=0T39X-` y|JmDH= ]B:NJ&+1虪1*TL i} j]ڨﻬ*6_{=\UFWOmېڙs[@ p:͝ء*gΟCviU6(h5BYeS(1o\6>d5J5co h(d5Kƺ%>ʙXZT 6_ˇOp0 uHe흦>}؃scBh14k5?`Y!ɤ7a..BțIb}NM{9A"s>Us>0Iҥv%UViO /Gxt@@ +7mҫҥ)7tlP  $0t|Rn.c-Q^%䉫pCC7Z*g4S]c9)wx~1'1^G`>:L4.c.jFKgNH,f֪+G;e1H[5VA`u=/3555==: hkYfu'(/u2gO8T݅Bh2Ln2u8Ӯxjq&ì ho}f%T&d&s,zN-v{zhB8V#3`[zJSjlw!G]JȲ/ߴ_?P݅Bh>xU\dl8շmdX5l43:7ErL˘ATѬV#!+Ayr&d-⣁`ao5K4o,YyxQ#2jhu)Lrb@@@@ZgmESYP"e     Pwwoƍ$e;H (qZX;PJNJAk#G !t!?~W Ʊ񟍍V%Xd2лDW P&&BPwfӴ_ $Vo2u2PB<{ҜD<  mGk )jK;tF! !cD /@} FqM3k4 ES$bDqbP,)sHuKsT)WgYzvRR Eu_ee:L221mXZ*`tv%XAW+軵#X~C-[-(_9Ζ`@@ڟ@>]TFٙSΪ] (Afec@ Hkw{YoA{/͏]s}#$=vûn3:3ec#Ͼw<[a۷&z[w }'g_(ѕUc=r]Ll42D˛KA8n6|h bX^ 4#mW>f-/ʼK~Sw.ct!mkW4w 3_~Yӧ3P}ܾu;o_|Kg+owlOd􆞯<8Vh X@PGYX_)e,,g7YYs2V؜,?\6ZڭȜɵYLܨ]8´l-nɵ@)UIj:m=5u6D`,#%4 aI}EpVl:hc1H5ZHܼNJSH,KR Ƶs=U4dGwӟz-z~Q?{ GB ?v4|>I1E폲&ѽ@>@UhBf7mU@e 7+5!|'?;n޲}q\W3eQ&D=&b6A.6F7sHrcq9#}s#dl ~O,”w-Ų= ݣ|߹+}$mY= |owW]M[h8n5*a8=r\U荲)QLbBGD4Jo1s>% Oхb<3~()#c' BRl&bYX00` 6UJoʙp7Y`Ʋ&$a^d{ml-NM{NY?k3Xd>)ҘŔs. 6SDX,c3%"̋K Vz}mxO}T G  !Ў5z4رc \B6o|Id67E VW!LߗP9O-Ǐ+{m7[শl7o\l/?~O|)[x!|c%m/1J578S. ߌ߹ўNJ.fv6>(4fG.b; - IDAT%e%OEGf}AŞ7}5YŚzwQzHZ{ɖA,CӾXIZvo ƂU)L͈MB3Wl)F̚GC!cY2.J.v\O/4:-2wynnnzzZр&^|==ոRw  .xx8;!lO8.FAHұܛNl_'|.cYŤϲeMi%͋VeM^N*zLKZd'ݑWƾt>"M,c)O2 .Ք)V7mY3!sXlJu1d,kR GElG$&]ə?8cwZ$٧rłc7hP(O^_q*me:b2kjc kځL|s/:@hG5Ԕ`0I·nhh*c FTо>Y)iʐ(cpvZ#aƎ=*7;ok苏~;g ylyZn|Ai uLvzu&~4Tgٸo>J VT> $ dgVB<J۟)I9sѠ+ ˘~yh8S 'w6Yz)& KUeu  P7&#/lثtK_wf h5hi6(=Jaiv```Æ 4*SRQDo2AJYf3,JpoR*R Q W;|.{|XQ}?r2|eF:ޑ*Yvk`xaҹZ!*th-mKmbN\ܔǓ瘭yfC9'RJp)^-KRO&tZiR@| |=p\y$@@ڜ@[hfӧuddk %â:]V8yC=$@+(,>?~A! ?pϽoݳc$b4?ѪPlcQB=/x-a*)GڛMRX7ao_[xO-DX3(X5_2%+eZ((X$<@L.%z racxkg:ۍWnze-Zi;CTs/XCM÷6̝OSo٧W_艕~}FR4>QĊZ]bT=GJze-ZBzdKLM+{6h|Z3˧ϸ}F{?;z'FQݳԕukkntuSiZ:_4%=?=9p%@ψ(eXqZkGGU ѸQX S~kKJ+pW+>?'nWUYş|_W|үK^zFǛ=ko|[77[ժ^oGju[?}[]g\~KkUlv:?3;;7;ea J9qcγƦ[;zzO\(O(DRj}|x3X>9p256>1Zk[;:N<7zT`o/-WGJ{w''|TӪ>\tYe:3U'Vb=04:Yi rcM?Y}ݭUMn۟ @ V\gt:㪱RX7XmtlyeO׾NO}tNk,0MPL8kvLYP1LWir=LQ 0  QT뻧E;500撚=Y3eAEmaafh<%>2. Թ* Yĉ׾JxtIYˣ6t7_n6_oֶG~W~WjgGqw߾wZ|Zsկ~_yoew}c@cjLX[ՙ_j5?WFtUXsko7~dž'6w|ZQoԴ:d ;<1:6]E-qQ'F6Rv\wT;^{yW[6ߴJpgT *v=6(kE4͋/F_&guҥk׮Ŧ6"4?k \RD˛ڌ}tT~6M/xIUD˛ڌwSʠb ܴEO׶;Ћ`82~2O/[M#ҕO\75~of-]XGIm7?ɇ;?YcjzIKއծϭ?73s}SnQY[sGo [Y|4y}W'k]+'$ܛz4Rscuř'o=]]w㮫s-ݹ5:5tuzymR؝qףZZ]φk/[so~vչ۷o&}Nծd΍ޛ/ܺ5ܻ;Xa #?M'GcDP%Dz"+KqD&19APdiN|,MP2 2ɌAF蠢E=莢 *PQ|ܞ (MF_qUS}EQtSGvJG\Ƣ/ʂO?|z].by * >ՉOJ#?\}_bO8ӛ?Qۍ  .\|࣪Ht".KpWTMr-ޞׯ_6ן v57Z^8);?ѭZu xr牥O7{5ͷzO, ӏS~Wdcop׋7^9[}J̧?ޭ뮡_{iiifT+E"e|^5 *ek,.-n.;ϸieeaq1E'[bBtPӕM'[k,/.rUd4\3.\~Zv\kv9nvmnfOLXw2j`OyCd.U2v\_O14_x}k\+e>8 &Dz$3 .|,N\ci̲ciHcâ/򊳛z8Ӝ(NY.oeoKƩh+8MT䙱W; Q Ýܭ2(J׊v~73=P\;(͏}6\w_תwV*Kjo[;BqmcqM7qDOznP^#@лK+^"@I +5KjiC<9o8RǏK'-O/eN @; ($c:xfnMljvopd\ e /:ص5Z>ag@#~K(X@Z^0  @@v +?} ;[8^y9uRmi5c,m~O% vmm6=uOh9uM!Vq)P% (}X?}6sufg?3`ó"MO=<XS p/?u @IN%|_ Ш@<,k"]W @gU@qYEMMQ\{a&q $ @E9v) p׎(U *kwww07IQv:Z) p YEe,'Ox(o b'@ w$c||<5Hha @ ܿcϝ;_ @!ԩSY QY[YYi5 @,iGyNC ӧOǺ C0l5Z_ F`%yIgϞݸ'ۅa P~ŵ  p>Ӹ"x;~\'1:,kk= !?7>>=:C/R(BmEj/چʖg"@Q˗_(E/V @sGGGj*WUBjO<r KcH-׬]vmПgz GP\+ݐ e:Xtx ⱡqh|v\VpD .\ظ>{n=i1cgΜ9b! @kkkC⺶&@H殮`wY~Em pT׎H[^|ᡵEeڏ[- 8QV6ݠYAr)~!@P\kDϲ p0Q\zj\v0V`dd$Y~mV}޽{6^yk9N9v͛7c422c6544/jqqqwwwN\r%?N2%6H߿K4rڵ_|qllKn[[[xky /DwW#Eh 6c"#b#LdfX;zzzz]$H2 oK___I$"G|rIfl $cgΜ ٧!#SJ!>Aǎ0qxE Ɍ|, ̋/ƞ6A>',LEi"XGGW|06mĉΝT*ΐܶرc :sL5==8ٳRn@wwwjoob#OTlɂZYYY^^ޖ7ljTbGc6t … '+azQ8qppɓsss9mS΍=mb!qԈmz{{4Aű~~~>/ɂtbuuuyr~H]yK^cYP4AeXJӓ L^c;JI2#9M>ǎ0c8 o$i#Y>e2cq|9M>ՉȜ%jTʵj\+ZvE}w)Bt~cGG_ c}-gQkf7IU߲Z*oQT}3>:MP"DGQo=ŋH}3G8T$-._|ҥ}-ṞEP|P_.AKN]D|Zn}))3bǞ{˛̂E"m3Nk{WXE4^fTԑk'{AEl$Q|,APY&˂JL2|,APY>&LI2#oIE&;4XDy>_JeA%b\b}EqѲ$s_4XvdIf"ɒ̧c]}, >x\PkGA%@GP)TKTOub? 1JVUU6bv`;]UbA%4 *vi/APqZTUCħɂ#otWtPq͂Jp-mtA^tP~} *6%RA%"7M#. JdfG4If$bbKTR|,K2c1R *e8Y>&scn$3R4sxOv {|XBw9 @ @C*m4$M @ @ Zg @ @P@q*$ @ @4kiB @ PBŵ @ @(q  @ @@ J8B"@ @H#Y/ @ @%P\+  @ @ Zg @ @P@q*$ @ @4kiB @ PBŵ @ @(q  @ @@ J8B"@ @H#Y/ @ @%P\+  @ @ Zg @ @P@q*$ @ @4kiB @ PBŵ @ @(q  @ @@ J8B"@ @H#Y/ @ @%P\+  @ @ Zg @ @P@q*$ @ @4kiB @ PBŵ @ @(q  @ @@ J8B"@ @H#Y/ @ @%h)xwmmmi_~^{j^mnNwwo211 9N|מ{/Ç967n;vljjjNy7]N:[oWzCCCoη٭9s7\YYjSymvkkΝ{חfgg{/sNnm… ]XXjSn޼?<o8T5ڻ/ǩ4X8v."J299{Gx[%H]|,V8A9ҕ+W+N:$.]J DIfΟ? ^%a6ܽ#LjD>oIfl䱧M68Md(-..> zRcQ):"R$ܿA.u^z):JESy|u$E' &$6d1YPqcRTlm<ΜZ]ډ'F*YPEH*M_1RqE8bRU^ƒ郊MdAeĢKxHa7)viSE6"Kc{HI2#8uI0R)SǦ9gIf%F*0Ŏ"ǒҜ *R#f]_M"; &8ǦA`/EdAɜ#KdY]H9]=w X\JSM @ @J)–R  @ @m @ @@kuY @ @m @ @@kuY @ @m @ @@kuY @ @m @ @@kuY @ @m @ @@kuY @ @m @ @@kuY @ @m @ @@kuY @ @m @ @@kuY @ @m @ @@kuY @ @m @ @@kuY @ @m @ @@kuY @ @m @ @@kuY @ @m @ @@kuY @ @m @ @@kuY @ @m @ @@kuY @ @m @ @@F.5IENDB`antimony/doc/imgs/sub_outside.png0000644000175000017500000005302713341130426016447 0ustar tiagotiagoPNG  IHDRb4= DiCCPICC ProfileH WgXSI>$Л(JTa-$P"uqׂ+*b[]Y "vZ.l|sCq}v}<39=Ν;i+ŵdE`4>C 4 8X Aqq1Md5G׿sH\(PwP$G// bZQ>;֕#p ӑzǖ*K gpB/f!?N2T*87aM6/~!A j\ U|jYyd<ꢼmșMadbAJISJaa%~#F#l39IAZ GHeKp|j<[;>K"°# xv4< a]%NX<a-9 IūlxJ%;2TȆ`"O g#ޭHx4CF%&eIzI~Q0凲/UoFP9 Ǟ)'R<q3[0H3,( =@fTikW} ! 8 HQP%ȐMи`UybՈxG/@T]H/{p_sP'=J"չA\4䨕  v݁d]e )ACǂyϊeL65RN~H?NHCp$P$Ad٣T+}`(8s9*2Bor0G "dwK{q8I''N %45xUFsP AZNOwC C)EEhAriVa1?J&9@ kjxrMeh S>e 8+g }R]*v$u?&2 , c !&K `>B9,հ66{ Q8 g"\p͍vxz1 c`l`怹`^?`X*ea2L¾ʱ l=~Ž`'Xv{ub8p]G^x',/Kx5O@h<Œp$"H#2 91(#*jbш5BItH:%#d)$ 9r=#[kCBcӌh4Zm-6VJFN;-NmLggҗ7ӛmap`1bFqqxf&S[V[UjZV>"Է7_VoWej3m~Df6s>s-s4󵆆x <4i}běl&Mt~ǦhNL9NKOIߝI+dDelEDb?qi_fEfG_ʬNIR% ̎ޜ.'6ggN_nJ<#2YeSK> X†"]yU|xX_\U~ZCӵ˦_a?c%%?$g g625Adik1wyvgϙ |mB>.RN"E'~ߺu_Deʝ+?-.k[uDzMeoXBʱ+VWzzn0((ƬmXgnO%oTW`awEn ܴoHںr}['ۓǚ;w|)۩Ƴfexs=Wmo~p@qO?<}ס}?[0pYV7^RlHmh;2Hso_FѪczǖg_xDɉY'7Oi{j©-[ZOG>w&̩AgO;w#._Xw_=Zwr+mێ_ zZ3ף_1FͤoM-q'ߊ;^}T?cC^zc}ʧOk:\:vw^yͳ{Jc ? //^-ymz7=q=}W^~Lw'Ƨ>7~r//_ jLW;ةhp?s,s"@sՁ3 i@L&Tf+N u"*.*hk1೼wc_h~GYSg-h"? ih pHYs%%IR$iTXtXML:com.adobe.xmp 610 418 -@IDATx tSיlٲ#[`b0 @ Ä 7N'Y+pKsc5w2N;iڛiY7}OBC3 mB!(S(1`#dd9l˖,Y:y}Ϥ  d us  | @0eee {qJJJnܸ1 +B CIE8' eTCp'aӌ3Ʉ}20@G`Sx/D@"@o2Q$@iHLä $2(؁ 4$LaR ELI@@@&0@"L&$v @ iTB D@&E; 4L*!A@ "@HCd& @ QD !d2 JH (dHbҐ2I%$@Hd2Q$@iHLä $2(؁ 4$LaR ELI@@@&0@"L&$v @ iTB D@&E; 4L*!A@ "@HCd& @ QD !d2 JH (dHbҐ2I%$@Hd2Q$@iHLä $2(؁ 4$LaR ELI@@0eee%7nNui7Vbs|bXWqVq7 `  .Bb!@o2Z @& p… X Т, 2i. Be!@`I%p!@ d,( @#L, @@&cEY@0d` '\@2 -B K8B@,XhQ F4X b!LB `0ȤN  d2Z @& p… XLp ni2-H Rׯ_ޫWvvv޸q#jEoYHPʴ L B5X^pڵkc)SJKKNHNBHK/_bק%SN9s&=H(q0ѳp@ b_Jg>}E@X1TL(z2*LV]2aGছnJ8,L^k@H9荘}r7BLd&]LS3 "/pd2v @@uGЉI]QO 79YP*" @@ˑZJ,ҒX7nػws碘XeJZSS#ޱ%_JU Aՙ5kVYYxA,v==|1eÆ r7|3zD]MF +WJG I҅dP y睡%]!LXIIɻ{y~d(Lzke2d?;c'֡QZKUtbC:/ѭ B~N$Xerӓun.Ϛ|]4yovg8Vvor<_͑Juа ,\pbRZ 1:Ulkk{7sfwpvt{dΜ7D|4'2Y7:צ9ho2@͙U2K|—[#B0VXYfpyCpdtr>m !˽,~m&T)Ej#eN@>`ЛLʉeȤĂH(!-S[? M&Y%21g@djH (R;uTtk\M-d2Szk?kjv!'OI_YX!XnyL{˗Z][hOuʨe+&w9vE3b ұ=ou1PekV/_4$f}2{Z[Ν>ǂf-+T6Ekm9KM+O`Hmo-57;slַUt%J >hrc*C9ʖ]KfM+Zw]8;Vq#E ?={vƌb VL+Wfyi<ɡr^OV;WIvtʺRsha󖬨NZg~rYd? O=01fk޾xS?~{>MC-lv$9]%.p:9E%3KJK?Q|Ѹ$ψu|;{c;s\E3g9ЎcZ|xo5Wmu=vb89r%ѿ#RK# cjO` %cAjl?vhu9wx]7S킊ܵ/m:?Զuv7~S{&?faqYaKdʰj׮#am;[[>pŧ~EKe؃@ NA=X+i<á(;(KjG1Kz)>xB%>_ykqҼrok;NuUݾQnO6l?l,fmo>9Z܇^Q#%ɱdFh}MGB}. ;[\aO J:bקpUcxkײO?!&l肯:4E\?Hӵ{_}A5rsz_hvsOiL:\psֱ&̼ʠiDf$X)ew^mr)X.M^ \hdEE}ݿާLP=5Lorp6oĸkitW}3jBb?WBuVb'ofhH;6 "d_3gеNjp)' 0{<[xyjC9bI JļǶ}[}++G\?$if<_:~r<[6gM4Ihꇻ*joET5ImW0aC$~lٲ&?Oe|FޕԆ7hB&Ar+WzŒJvCo7FX6KYAXЅv\mv%WyV}`L|o*!'O,,,HkӧO߰a ↥RIrue5jWvαZVP =G9$|U4G@-]VjL%l0snM7TjqHXTtuwwof]]X\60>ch6Rd$>˭׿5K n +x*k WyݢVoAfI?28kYC V9'kZ,,[~ P^N}n~0@[¥]9?tֿSrL^o>zRՆQ\_㨧 V ڵ_~W_xQ<+$4R Te)ЎoΡ{goۢNlks]|>i(W:6Fo8%$@GGo["ҥKm6[p`dЛLxC** HЫo+zG- =EB;j(_n'{މTVBHQ|NP>Dj9ұ7x:S|7K@Vf5؀Ȉ arτkbpcgϮ޼yƍW\wGO2Iԉ]!{otHS3h<Ҥ9!uk?݋E덩#zz+8RL|CJ ImΉ~G-S@sאmNZ|O8rvH~} !0Ai;cey vŊӦM?$b*3$h8O*((fƿ5j),.vg[^K}ҙa_e)¬ɢ~ĉ݈8ϝf=%-R򳻻֋tK[[tZ$7{zZ;--5ѱc \^QKLPPS*8$ُ^3bٹOB@jkktlO 0nXߢ܂SXUO ~IaFĩH]k˝b9 N{cm4e!0. n~l;wxh< 捏FA&ukǟzPJΞͷ~. RA@mll\np~U!!#G`G&Lix*%SF]2B 'gY#񆐄CNAd2yl`c:}'?+9 NC `,!crŲ~!zR< *ñV4NydR߹n9k'[!0zb ϩS8-kF:Ka'B)1w4. m>v,/1Mcc.cB0 L@+V[*=YEωG,& E 7) ?ĂaT]@qJ6qx8*bƈb$u !bB8 G@pO-C JZ #L_NFLJ A@@&/D@ #0)x@%L658@'L>x@%L658@'L>x@%L658@'L>x@%L658@'L>x@%buM Z)q?cב8WV]evc;8ڦ  0a kuNܩYȯK(bXU屻Hc;v0l㪇=YsH.@2tWw^jkt/J>DzO4̩Fwܪs-+FA[Cnfu˚S>i ܤܼ!ɜ=TC9GLke?o{X+;rtөuuHrL/"u_h r͒cVI\vNCI@`| ;eťC}_QJ5w7ͬМ"QY徬Y<ؐ%'tC7! !@& Տ?Z6p Nhx)3"}rv)w]WGwuJZe-!?8* V<~_y`IJGM*:C^U47**k[7)򯧮Q'Yi"e̵{U2\J_S2w]h9,`zNJ ˋöSh'V=֭[lٺ~czҌ m(Vtuv!@&;宇xXmgֽ=異;/RK&:OUUU uN1,|qץb&nF)]laa)ٚG5eGeŹRG9{XM]cUsHk~+GWwg^j驭BkLʼ;cɉ#>C`HLj7o\WW29{r?I|bvc 0^ ܛ L@cM߿?%QݔK 0"$SK4nF$L `\ȤqsO 0"drDD @&{"  #" 2i9 H`  A@!qC@$@oRy+@dRi @'dRy+@dRi @'dRy+@dRi @'dRy+@dRi @'dRy+@VE d `![bU.ݺM! @:%L41@z L! @:%L41@z L! @:%L41@z L! @:%L41@z L! @:%L41@z buz>D!P\E%N]_4G0ܥ+8̒_s|lǶw14\ґtKqj뜸[ȯK(bXU屻Hc;v0l㪇=YsH.@2tWw^j驭Bk1pLDzO4̩Fwl9ݖo!u7:Ө29=Dm( K ضkd_}?B'pPቍ]bYqںgQ-p|RϿQ(m|\drW<# .B%@oҸuū(ï(){f_sӢMիȣ5Fj#ϿrX1ZP]<KHLۗUnszãd׋ y9ul*?5Wo;eܵT` $e<^j[ gԾ}3ɖ,aW_@tM4uk-WG#S}즃#FCׂ9rsmNG{t"t.Z:' rŐ}dpvyy=yehj?#[$"VWuEP#%G#VyT^$tȣBߠ'室$\lխ  cI܅22jgyR<(YKG+V/ z\KW=)Wqn=N_2cհ7+wz[0TNypUp -owg2oYZNɝ ʇߴJͮWWΐ,CWm!.kNɬ9zUC9{rK`&ߟxj7%цǯYXD"wYAþU_rlt+GJ;4!$]79a>q;zcjtيCb_ۉ&ERKU;úz3F}M6_Gf67=~EK> kْr_h::z9L}v_h|/%Ǭ,\* 0dr@iqc r [IJ+wP(_kq$n3+}?74Us][x!KN膤oCB"L" ;WB*Xz%rgkv2~o5[P<#ClU>y5S}VҚ_6h.k u9 @& I8xc6Y*?.ozw"#>('&hnD/J ~#,F3gA[1crV@ȤQ>{Ux3eZ;NzƚmWgkذH>l;Fmzl-[lzMhbi6+OgE]G: FIy JRo7xϑGݲ{EžU6l޽]3NgҮ_{dnPf$Vэԉ}t֐|!$'26nz}{''iMOz[˟GjX<|>NO^)}rӒy|*Pcs򫵴gه 70߃Epо^w<;^xvetI{ϋu&fӧ_QxNxu I}jw^ܥ9. H&#K]K*z>]laa)ٚ#վ홧#Ni='w郕J\kc5uZ> I" T[[7}7(/qo8eL&]{и9!rJ:o<ٓ|o3誇xbII}4r1791 dJ( L ^B@J )N 01u OUU<#NU#mA $IM> kor)avS,B &G$O@0.dҸ'r@29"" @q ޤq8(~h}]Ghz,۱x 6U K`RAAu>_mPk u\@ kAmW$\#jq Л4nuy`+F xܴ讻FBbV Te #7i]Q=?|];wW]k_uf%S:/^=O==ێGtw9tSP̢J;Gah[ g X:o_X3-^',YoYZ1 +  4ԇוV5(*{ r%I?SY\ fȞ;sRב=v-.W 'NF(yy=yeh"0 ]i9N<(I4R\ϥlkP˅cW/_5 orWRz6/ H:zt~{rM:mm9uWqn=N*,.tKfHuoV&6 aCypAms0dh=/ zJ[2$f[@l> 1(U I `` WhꙴT1م G4\'F#eW>/W}/mT_?ټ{vVEyyN3r5Aٶ[1YyC Utb{4Uم EH L$I%EgjShzk~HmSO+OnZ07oӓOjyN~,T&U)b\}Ğ/<2d:Ͻ:q3Suүu<'wH`9VYmӣ(a٬SdM hFfe[',y [NN2)PC5ZC7HL{GVS0:xd!]@&}lpצNd֬*Mս5)gG5;sٺoޠ^HߌmF3v3*ngXrO辺YNna˟^65(pM ʱw/JJ9NȸaϷiY,FKjΟ65VTL;w6@l^ ~~uwu*hLyw~}dp'()D6y'O FsÔ=-\#–|ei9`ۜlQ2Y#\#{fǴ9ZYC6|C_5ΡT~Żh77vғ]v]tĮy݄vgMsɒ3$:=bU9:^c5U9~Og[ Ξ}PU_3,;w\2r,ܼjwf˧Z/uLeN˷ZyΖ/A:&Y~ŵUOUb)NtV;S?8."]]RsO?]vUjn]qYKb%o{v5@;⾠d77o-ͣD* U2~ogZ2s͡ꎿA̹DMV-צodqd+ yZe;[܊fLEUNWJoΕ}t Bv2_0=~OIdZq/ÜS(n>Fܴ 9 scXw tW4d5zݣiUdLqfxe0+EժtOAW]4_t(B%o]65,,'t{_j979fXrӥz",Gb6ffD^v/ensfkӔ_R]`X"!EWBSiu㐦ɺ v=3^c=l:sM9]ȧ1&IBz|C7tQU҆.4!)sg__xgAVFa" H|hC?iވ1>4rd)CBS:==)ΜuS hr 1C)؃tN ײΝO?ఫX<Ǖ!:y''Qn&L1Y&I61CUQ31˦EKv5؍_,. q%l\:7c+/Wy2is1 ;c2w-U9Y#Ep {[C9o<^)7gJ^A|w|ehS3dR_٩=rFq(us9gA{IgI=|pig7қ#g'':Sp 988RFͪjCS^\(.ekwGR],&RA= CAW}y+ޖj{^)kѧ׶WX}-'7g쥳K{/h@[~iN7+ujop8k ^X=(#p2G0m3:C? M z}-Y١~exK,q0$luZ6]gk{Jb9PxpshWv]xOL<ӱY3/W* *ԅ]*`Yhĵy[9ւ҅JMuΙv?5&v952WW.?oДIF\"lBf6S uv:l K` d-,)վ9t9Ôyh6O0ljZ3VPh 0)#Bz&N3}t9n|rϷC"_~;l/~{TNFރ0jle%ImK m\ju57ڡޕu{C}񍭎kRBSOd[}nCk(W.yJM![yZrKIDATkʏU&?^n"U3 IM!@:%L41͔st.@I`kLmb\Eag*JtέW iF@&Ҷ7)fTO @ H 0]ݼQ͓S'oʤ22!@m ~Θ Հ!dfXQI@E$lLف 4ivx]Mg MҎz:@[Z,s횎^2)42{DN),,Ԧ}@Szjh_;i+ BҵL `~d7(˗d`J3nt kCYZZz̙KF" 7hoo* t# jll a'{Cw(ӧ:>@ :03g = "&^tIodLƩ|J?׃[~~lSL8\֘:)@! g͚%:0rK?IWwd\&EbU$#4X(ȊPP/ H„ YeeeߐKC줿L ʇ=B;{{{K|0rsp --$/^' ЧgJϘ1HkVLtvvz<6E5|؇ 0vBŃbݕPE,Ϟ=٭ڨ"rqX 0DD܏~hmb5Eܧr&t@gbN>}Z sӱ6gd#Y@`XgG,ICoPYt8q)S.<$/NB@LrbDYb Q1:y1LƔi C *<(!C@ )@H7dex @  d22J< @dab ҍ2n%@H d201@FL $2@ t#L[FHL LLA@@&-@ $L&& @  Q @&S nLYYYciFMcyRc7p=L3&a. 9Ii"A%@o2^rԃ @& dB x  2i$" K @I$!@ ^d@ L Ʉ@@&%G=@0dI&D@2/9A H2!B@xQ @4@ %LKz `ȤL /d2^rԃ @& dB x  2i$" K @I$!@ ^d@ L Ʉ@@&%G=@0dI&D@2/9A H2!B@xQ @4@ %LKz `ȤL /d2^rԃ @& dB x  2i$" K @LYYYc ƍck]ڍX|Xk9Vbs|bfM< K XЛe!@`I%p!@ d,( @#L, @@&cEY@0d` '\@2 -B K8B@,XhQ F4X b!LB `0ȤN  $, ZIENDB`antimony/doc/release-notes/0000755000175000017500000000000013341130426015214 5ustar tiagotiagoantimony/doc/release-notes/0.9.0.md0000644000175000017500000000636513341130426016214 0ustar tiagotiagoAntimony 0.9.0 -------------- ### News: This release has a new f-rep parser and a new graph engine! Not much has changed on the user-facing side of things, but the internals are much spiffier (and will be easier to build on in the future). -------------------------------------------------------------------------------- ### New parser The f-rep parser has been rewritten by @fros1y to use [lemon](http://www.hwaci.com/sw/lemon/) and [flex](http://flex.sourceforge.net/) instead of a homebrew solution. In this rewrite, a second way of expression math was added: infix notation (a.k.a. the way that most people write math). For example, instead of writing `-+qXqYf1` you can write `=pow(X, 2) + pow(Y, 2) - 1;` The leading `=` and trailing `;` indicate to the parser that this is an infix string. For examples of this new syntax, check out the [cylinder wrap](https://github.com/mkeeter/antimony/blob/develop/py/fab/shapes.py#L804-L824) function. -------------------------------------------------------------------------------- ### New graph engine The graph engine has been rewritten from the ground up as a stand-alone library (though its design is optimized for Antimony's use-case). It no longer uses Qt signals and slots (or at all!), preferring the STL and C++11. There's a slick re-evaluation optimization: when an upstream object changes, the engine properly sequences downstream update calls to minimize the number of times scripts and datums get re-evaluated. In practice, I've seen a 5x speedup on very large graphs. -------------------------------------------------------------------------------- ### Breaking changes: Some of the changes made here are potentially breaking. There's a transparent migration path from older files; feel free to report bugs if it fails for you. #### Namespaces These changes will be find-and-replaced when older files are loaded, so things should Just Work in most cases. - `fab.ui` namespace has been renamed `sb.ui` - `meta` object (which contains export functions) has been renamed `sb.export` - `fab.color` object has been renamed `sb.color` #### Graph changes: It's no longer legal to connect an output datum to an input in the same script. Previously, this was legal (but a bad idea). Datums of type `str` must have their expressions quoted. Previously, there was a magic check to auto-quote them if evaluation failed. -------------------------------------------------------------------------------- ### Other features: - `Return` key acts like tab when editing datums in an inspector. - Unary operators (rotate, scale, etc) preserve shape color. - More information is displayed in viewport (thanks, @joshuashort) - User data directory is searched for nodes (thanks, @astronouth7303) - Gradients are calculated exactly (improves rendering quality). - `right_triangle` now supports negative widths and heights - Keyboard arrows (up and down) adjust datum values in inspectors. - `arctan2` operator is now available in f-reps - Fun new nodes: volume-filling gyroids! ### Bugfixes: - `.sb` extension is now added on Linux. - UTF-8 is used regardless of locale settings. ### Thanks Thanks to the following Github users for their contributions, large and small: - @fros1y - @joshuashort - @astronouth7303 - @toomuchcookies - @neilger - @defnull antimony/doc/release-notes/0.9.2.md0000644000175000017500000000122213341130426016201 0ustar tiagotiagoAntimony 0.9.2 -------------- This release has relatively few user-facing features, but should make the lives of our Linux packaging volunteers easier. There are also a host of bugfixes in `sb.ui` (which contains a set of immediate-mode UI calls that can be placed in `Node` scripts). ## Features - Building now uses `cmake` instead of `qmake`, which is much simpler - `sb.ui` functions now take an optional `key` argument to disambiguate ### Bugfixes: - Dragging `sb.ui` shapes was not undoable; now, it is. - Correct positions are passed into controls with absolute drag callbacks - Fixed a bug where you could end up with unparented control instances antimony/doc/release-notes/0.8.0.md0000644000175000017500000000321113341130426016176 0ustar tiagotiagoAntimony 0.8.0 -------------- ### News: Antimony got unintentionally announced to the internet by a post on Hackaday; it then spent a few hours at the top of Hacker News. There's been a corresponding surge of interest (and Github issues), and we've already seen a few contributions from the community -- props to [Michael](https://github.com/elbowdonkey) for tracking down a mysterious GPU issue and [Joshua](https://github.com/joshuashort) for making right-click open the "Add node" menu. I've created a [Google group](https://groups.google.com/forum/#!forum/antimony-dev) for people interested in contributing. ### Features: - Standardized shape and nodes libraries - Moved functions from nodes to `shapes.py` to make them accessible - Cleaned up duplicate nodes - Added UI handles to many nodes that were missing them - Make selection less silly - Previously, when a node defined multiple controls, they could be selected individually. - Now, they're all selected simultaneously. - Right-click now opens up add menu ([@joshuashort](https://github.com/joshuashort)). - Added `--heightmap` option to start up with heightmap rendering. - Made `fab.types.Shape` constructor accept a single string argument - Fixed building on a variety of Linux systems - Made syntax highlighter fancier: - It now properly highlights multi-line strings! - Add orthographic view button. ### Bugfixes: - Adding a Control when "Hide UI" is selected now works properly - Snapping to wireframe objects (in viewport) now works - Removed multisampling to fix crashes on Yosemite with integrated GPUs (hotfix to 0.7.9, thanks to [@elbowdonkey](https://github.com/elbowdonkey)). antimony/doc/release-notes/0.7.7.md0000644000175000017500000000121113341130426016202 0ustar tiagotiagoAntimony 0.7.7 -------------- **Features:** - Allow `fab.types.Shape` input text box to be edited. - This required a file format change. - Older files will be automatically upgraded. - Warn when unsaved changes will be lost. - Drag functions can now be specified for `fab.ui.wireframe` objects. - `Set Color` node allows shapes to be rendered in custom colors. - Add button that shows hidden datums (i.e. datums prefixed with `_`). - Tooltips for buttons in inspector. - Remove `Alt` as the *Hide UI* shortcut and added View menu option. **Bugfixes:** - Close script editor when datum is deleted. - Fixed subtle bug in `Control` construction. antimony/doc/release-notes/0.7.0.md0000644000175000017500000000132513341130426016201 0ustar tiagotiagoAntimony 0.7.0 -------------- This is the second major Antimony rewrite. The first rewrite happened over the summer of 2014 and was a switch from Python + PySide to C++. The current rewrite happened at the end of 2014. It preserves the core of the application but dramatically changes the user interface: - Instead of graphs being super-imposed onto the 3D view, they are in a separate 2D pane. - Arbitrarily many windows can be opened onto the scene (including quad views). - Script editing is done in a separate, dedicated window. There are also a few fundamental features that have been added: - Undo and redo - Copy and paste Finally, the application has been updated to use Qt 5.4 (which has a better OpenGL API). antimony/doc/release-notes/0.7.9.md0000644000175000017500000000152013341130426016207 0ustar tiagotiagoAntimony 0.7.9 -------------- **Features:** - Dramatic speedup in mesh export - Added experimental feature detection to mesh export (which finds edges and corners and pushes a vertex to keep them sharp) - Another major speedup in loading large files - Made export workflow more flexible: - In the graph view, create a node from the `Export` node category - Attach shapes to its inputs - Click the small arrow in the upper right of the node to run the export task - Added 'Jump to node' on right-click (in either 3D or graph views) - Improved documentation **Bugfixes:** - Fixed a few memory leaks of Python objects - Cancelling an export with Escape now actually stops the export task - Changed the way that images are drawn to prevent (?) `QGraphicsScene` crashes - Removed multisampling to fix crashes on Yosemite with integrated GPUs. antimony/doc/release-notes/0.9.1.md0000644000175000017500000000261113341130426016203 0ustar tiagotiagoAntimony 0.9.1 -------------- ### News: This release adds support for nested graphs! The UI is also rewritten, pretty much from scratch. It implements the same functionality in fewer lines of code and should be more robust in keeping the graph and UI synched. ## Nested Graphs You can now add a subgraph by selecting `Graph` from the `Add` menu. In its parent graph, a subgraph looks just like a script node: ![Subgraph from parent](/doc/imgs/sub_outside.png?raw=true) The three icons open up - The subgraph (as an editable graph) - A single 3D viewport showing the subgraph - A set of four 3D viewports showing the subgraph Within the subgraph, you can add Inputs and Outputs from the `Add` menu. These inputs and outputs are strongly typed and show up as floating Datums: ![Subgraph from parent](/doc/imgs/sub_inside.png?raw=true) -------------------------------------------------------------------------------- ### Other features: - Unified undo / redo framework (should be more robust) - Drag / drop targets are now larger (thanks, @awgrover) - Local datum looking with `self` (thanks, @awgrover) ### Bugfixes: - Trying to export at resolution 0 now prints an error message - Fixed an edge case where undoing a connection didn't restore original text - Fixed transparent windows on Wayland (thanks, @ApostolosB) - Parsing long expressions doesn't fail - Fixed `QGestureManager` crash (thanks, Qt 5.5) antimony/doc/release-notes/0.7.2.md0000644000175000017500000000142413341130426016203 0ustar tiagotiagoAntimony 0.7.2 -------------- **Features:** - When mousing over a wireframe control in a 3D view, the corresponding inspector is highlighted (and vice versa). - Spacebar is now used instead of shift to snap connections while dragging (to work around a Qt bug in Linux). - Fail with a nice message if `fab` folder is not found - Adding polar iteration node. - Releases are now built as `.dmg` - README and USAGE now have `.txt` extension in deployed bundle **Bugfixes:** - Undo now properly undoes multi-inspector drags. - Pasting multiple connected nodes no longer crashes. - Inspectors can now be dragged after pasting. - Center window in screen on creation. - Undo/redoing pasted connected nodes no longer crashes. - Fixed graphical glitches in Add menu (caused by bad parenting) antimony/doc/release-notes/0.7.1.md0000644000175000017500000000057313341130426016206 0ustar tiagotiagoAntimony 0.7.1 -------------- Features: - Added `title` property (that can be set by Script node). - Display `stdout`, errors in separate panes when editing script. - Improved icon. - Added polar iteration node, which rotates input about a point multiple times. - Wrote basic usage guide Bug fixes: - When a node is deleted from a 3D view, undo/redo now re-creates connections antimony/doc/release-notes/0.9.3.md0000644000175000017500000000101613341130426016203 0ustar tiagotiagoAntimony 0.9.3 -------------- ## Features - Use alt modifier to drag values faster - Add 'Show math string' node - Pre-emptively clip rendering to screen size (which should make rendering faster when zoomed in) - Easier packaging for Debian (thanks @pfelecan) - Flatter icon (thanks, @ApostolosB) ## Bugfixes: - Tab and shift-tab now switch between datums like they used to (before 0.9.1) - HSB color node is no longer black by default - Fix extra images being drawn on load - 'Hide UI' menu item actually does what it says antimony/doc/release-notes/0.7.5.md0000644000175000017500000000067313341130426016213 0ustar tiagotiagoAntimony 0.7.5 -------------- This is a pretty minor release feature-wise, but it's got a few simple fixes that will speed up performance when designing large models. **Features:** - More nodes (thanks, Neil!). - Limit size of script output window. - Adding `str` input type for scripting **Bugfixes:** - Cache certain types of lookups to make redrawing graph window faster. - Improve node change detection to make adding new nodes faster. antimony/doc/release-notes/0.7.6.md0000644000175000017500000000246313341130426016213 0ustar tiagotiagoAntimony 0.7.6 -------------- This is a major release in terms of architecture: All nodes are now defined as scripts, and scripts now have the power to define UI primitives (points and wireframes) that show up in 3D views. Files saved with `0.7.5` and earlier will be automatically upgraded to using script nodes (instead of hard-coded nodes). This backwards compatibility will be removed in later releases. If you have a file for which automatic upgrading fails, send it in as a bug report for further investigation. **Features:** - Added `fab.ui` namespace, with hooks to create 3D view objects from scripts. - `fab.ui.point` allows points with user-defined drag functions - `fab.ui.wireframe` creates user-defined wireframes - Added `fab.color` namespace with a set of standard colors. - Ported all hard-coded nodes into scripts. - Skip early render stages if they are sufficiently fast. - Show error message if Save As or Export target file isn't writable. - New inspectors are placed in the center of a canvas if possible. - Automatically upgrade hard-coded nodes in older files to scripts. **Bugfixes:** - Build issue: `sb` directory is now created before subfolders are copied. - Off-by-one line highlighting error in script panes. - Weird namespace issue in scripts. - Remove white border in script UI (Linux). antimony/doc/release-notes/0.7.3.md0000644000175000017500000000140313341130426016201 0ustar tiagotiagoAntimony 0.7.3 -------------- **Features:** - User-defined script nodes are now auto-populated into `Add` menu. They are stored in the [`nodes`](../../py/nodes) folder and categorized by subfolder. - Optional third argument for `input` in scripts; it sets the starting value. - Open files from the command-line with `antimony $FILENAME.sb` - `make install` copies Antimony and `sb` data directory into `/usr/local/bin` (Linux only) - Double-clicking a `.sb` file will open it in Antimony (Mac only) **Bugfixes:** - Fixed crash when loading files that contain connections. - Fixed frequent crashes caused by reference counting on `Py_None` object. - Change deletion of rendered images to fix strange crash. - Fixed crash caused by loading file with Python errors. antimony/doc/release-notes/0.7.8.md0000644000175000017500000000122313341130426016206 0ustar tiagotiagoAntimony 0.7.8 -------------- **Features:** - Human-readable file format - Friendlier for version control, manual editing. - Older files will be automatically upgraded. - `Set colour HSB` node (thanks, [RobotGrrl](https://github.com/mkeeter/antimony/pull/11)) - Huge speed-up in loading large files (about 20X). - Minor solver speed-up using `restrict` keyword. **Bugfixes:** - Fixed memory leak in applying `Transform` objects. - Don't spawn huge number of threads when loading complex files. - If rendering is interrupted, don't leak images. - Correct interface between `QGraphicsItem` and `OpenGL`. - Fix *huge* leak of images and `OpenGL` textures. antimony/doc/release-notes/0.7.4.md0000644000175000017500000000122513341130426016204 0ustar tiagotiagoAntimony 0.7.4 -------------- **Features:** - User-defined script directories can be arbitrarily nested. - Sort user-defined scripts by filename. - Remove limits on output size. - Add real-world units to export dialog box (`.png` only). - Many more nodes ported from `fabserver` (thanks, Neil!). - Dynamically re-order data values in node inspector. - Window title reflects window type. - Adding `str` type input for scripts. - Axes are synchronized in quad view mode. **Bugfixes:** - Fixed crash due to decreasing borrowed reference. - Improved highlighting and selection of bezier connections. - Fixed crash related to serialization of scripts with errors. antimony/doc/USAGE.md0000644000175000017500000000662713341130426013647 0ustar tiagotiagoUser's Guide to Antimony ======================== Antimony is a tool for computer-aided design (CAD). Window types ------------ There are three window types, each of which gives a different perspective on the scene. ### Graph window The graph window shows a graph representation of the current model. Pan with the left mouse button; zoom with the mouse wheel. When you first start designing, the window should be empty. Add new nodes either with the *Add* menu or by pressing Shift+A. Each node has a name and one or more data values. The name is in the upper left corner and can be edited; data values are inside of the box. Each datum's text field is evaluated as a chunk of Python code. These code snippets are executed in a global namespace that includes nodes by name. For example, given a Point node named `p0`, here are valid values for its `y` datum: - `1` - `2 + 3` - `p0.x * 2` If a code snippet is invalid, the text field's border will turn red. Hover over it with the mouse to see the Python traceback. Shift + left-click on a datum's text field and drag right or left to slide values up and down. Hold `alt` while doing so to adjust the value faster. Datums can be connected using the I/O ports on the right and left. Click and drag on the right-hand port to start a connection; release the mouse button on a left-hand port to finish it. While dragging, pressing Spacebar will snap the connection to the nearest valid port. Any shape datum with an unconnected output port will be rendered. ### 3D viewport The 3D viewport shows a 2D or 3D view of the current model. It is rendered and refined in real time. Pan with the left mouse button; rotate with the right; zoom with the mouse wheel. The axis selector in the top right snaps the view to a particular axis. When looking along a main axis, the mouse pointer's coordinates are shown in the botton left. As in the graph window, nodes can be added from the *Add* menu or with Shift+A. Certain types of nodes put controls into this viewport. For example, the default Circle node places a center-point and radius in the viewport. These controls can be dragged with the left mouse button. If multiple controls are overlapping, right-clicking will open up a list and one can be chosen to raise above the others. ### Script editor A script editor is used to edit the Python code of a Script node. To open the script editor, add a Script node in the graph view then click on the icon (three horizontal lines) in the top left of the inspector. There are three panes in the editor window: - The top pane is the script - The middle pane is any output sent to `stdout` (e.g. with `print`) - The botton pane is any Python error that occured during execution The lower two panes only appear when needed (i.e. when something was sent to `stdout` or an error occurred respectively). When an error occurs, the relevant line will be highlighted in red. Exporting files --------------- The export workflow in Antimony is a bit non-traditional. To export a particular shape, create a node from the Export category then connect the target shape to its input port. If the shape is a valid target for export, a export button will appears below the datums in the node; clicking on this arrow will start the export task. This means that multiple export tasks can be defined for different parts of a model; details like resolution and even target filename can also be hard-coded by editing the export node's script. antimony/doc/SCRIPTING.md0000644000175000017500000000452713341130426014342 0ustar tiagotiagoScripting in Antimony ===================== Every Antimony node is implemented as a Python script. Scripts are Python 3.x with a few extra functions and modules. I/O functions ------------- - `input(name, type)` creates an input datum of the given type and injects it into the namespace. `name` must be a string and `type` must be either `float`, `int`, or `fab.types.Shape` (or simply `Shape` depending on how it was imported). - `output(name, value)` creates an output datum with the given name and value (the type is automatically determined and must be either `float` or `Shape`) - `title(value)` sets the title of the node, which is shown in a graph window in the upper-right corner of the node. Note that `input` and `output` will create new rows in the graph view for the datums that they add to the node. `fab` module ------------ Python scripts will usually import the `fab` module, which has Antimony-specific classes and types for designing shapes. ### `fab.shapes` `fab.shapes` contains a standard library of primitives and transforms. It is populated from [shapes.py](../py/fab/shapes.py). ### `fab.types` `fab.types` contains Python classes representing various Antimony types. The most relevant is `fab.types.Shape`, which represents a 2D or 3D solid body. It is often used as the second argument to `input`, i.e. `input(s, fab.types.Shape)` to declare a `Shape` input port. Other types include `Bounds` (representing 2D or 3D bounds) and `Transform` (representing a coordinate transform). `sb` module ----------- Antimony-specific behavior lives in the `sb` module. ### `sb.ui` `sb.ui` contains magic functions that modify the user interface in 3D viewports. The relevant functions are `sb.ui.wireframe` and `sb.ui.point`. Both have detailed docstrings and are used extensively in the standard node library; check out existing nodes to get an idea of how they can be used. ### `sb.export` This namespace has two relevant functions: `sb.export.stl` and `sb.export.heightmap`. When called, each will add an extra button to the node's inspector; clicking on this button will run an export task. Both export functions have extensive documentation; check out the existing export nodes for an example of how they are used. ### `sb.color` This namespace defines a set of standard colors for use in UI elements. Colors are stored as 3-tuples of RGB values (0-255). antimony/lib/0000755000175000017500000000000013341130426012447 5ustar tiagotiagoantimony/lib/fab/0000755000175000017500000000000013341130426013177 5ustar tiagotiagoantimony/lib/fab/CMakeLists.txt0000644000175000017500000000666713341130426015756 0ustar tiagotiagofind_package(PNG REQUIRED) find_package(FLEX REQUIRED) ################################################################################ # Custom command to generate the parser sources with lemon if(WIN32) find_program(LEMON_EXECUTABLE lemon.exe DOC "path to the lemon executable") else() find_program(LEMON_EXECUTABLE lemon DOC "path to the lemon executable") endif() mark_as_advanced(LEMON_EXECUTABLE) add_custom_command( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/v2syntax.lemon.hpp ${CMAKE_CURRENT_BINARY_DIR}/v2syntax.lemon.cpp DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/tree/v2syntax.y COMMAND ${LEMON_EXECUTABLE} -q -c -s ${CMAKE_CURRENT_SOURCE_DIR}/src/tree/v2syntax.y COMMAND mv ${CMAKE_CURRENT_SOURCE_DIR}/src/tree/v2syntax.c ${CMAKE_CURRENT_BINARY_DIR}/v2syntax.lemon.cpp COMMAND mv ${CMAKE_CURRENT_SOURCE_DIR}/src/tree/v2syntax.h ${CMAKE_CURRENT_BINARY_DIR}/v2syntax.lemon.hpp) # Custom command to generate the lexer sources with flex if(WIN32) find_program(FLEX_EXECUTABLE flex.exe DOC "path to the flex executable") else() find_program(FLEX_EXECUTABLE flex DOC "path to the flex executable") endif() mark_as_advanced(FLEX_EXECUTABLE) add_custom_command( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/v2syntax.yy.hpp ${CMAKE_CURRENT_BINARY_DIR}/v2syntax.yy.cpp DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/tree/v2syntax.l COMMAND ${FLEX_EXECUTABLE} --outfile=${CMAKE_CURRENT_BINARY_DIR}/v2syntax.yy.cpp --header-file=${CMAKE_CURRENT_BINARY_DIR}/v2syntax.yy.hpp ${CMAKE_CURRENT_SOURCE_DIR}/src/tree/v2syntax.l) add_library(SbFab STATIC src/fab.cpp src/formats/png.c src/formats/stl.c src/tree/eval.c src/tree/math/math_f.c src/tree/math/math_g.c src/tree/math/math_i.c src/tree/math/math_r.c src/tree/node/node_c.c src/tree/node/opcodes.c src/tree/node/printers.c src/tree/node/printers_ss.cpp src/tree/node/results.c src/tree/parser.c src/tree/render.c src/tree/tree.c src/tree/v2parser.cpp src/tree/triangulate/mesher.cpp src/tree/triangulate/triangle.cpp src/tree/triangulate.cpp src/types/bounds.cpp src/types/shape.cpp src/types/transform.cpp src/util/region.c src/util/ustack.c # Generated files ${CMAKE_CURRENT_BINARY_DIR}/v2syntax.lemon.cpp ${CMAKE_CURRENT_BINARY_DIR}/v2syntax.yy.cpp ) target_include_directories(SbFab SYSTEM PRIVATE ${PYTHON_INCLUDE_DIRS} ${Boost_INCLUDE_DIRS} ${PNG_INCLUDE_DIR} ) # Add the binary dir to include generated files target_include_directories(SbFab PRIVATE ${CMAKE_CURRENT_BINARY_DIR} ) target_link_libraries(SbFab ${Boost_LIBRARIES} ${PYTHON_LIBRARY} ${PNG_LIBRARIES} ) target_include_directories(SbFab PUBLIC inc) target_include_directories(SbFab SYSTEM PRIVATE vendor) ################################################################################ add_executable(SbFabTest tests/main.cpp tests/parser.cpp tests/shape.cpp ) target_link_libraries(SbFabTest SbFab) target_include_directories(SbFabTest SYSTEM PRIVATE ../../vendor ${Boost_INCLUDE_DIRS} ${PYTHON_INCLUDE_DIRS} ) ################################################################################ set_property(TARGET SbFab PROPERTY CXX_STANDARD 11) set_property(TARGET SbFab PROPERTY C_STANDARD 99) set_property(TARGET SbFabTest PROPERTY CXX_STANDARD 11) set_property(TARGET SbFabTest PROPERTY C_STANDARD 99) antimony/lib/fab/vendor/0000755000175000017500000000000013341130426014474 5ustar tiagotiagoantimony/lib/fab/vendor/Eigen/0000755000175000017500000000000013341130426015523 5ustar tiagotiagoantimony/lib/fab/vendor/Eigen/CMakeLists.txt0000644000175000017500000000113713341130426020265 0ustar tiagotiagoinclude(RegexUtils) test_escape_string_as_regex() file(GLOB Eigen_directory_files "*") escape_string_as_regex(ESCAPED_CMAKE_CURRENT_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}") foreach(f ${Eigen_directory_files}) if(NOT f MATCHES "\\.txt" AND NOT f MATCHES "${ESCAPED_CMAKE_CURRENT_SOURCE_DIR}/[.].+" AND NOT f MATCHES "${ESCAPED_CMAKE_CURRENT_SOURCE_DIR}/src") list(APPEND Eigen_directory_files_to_install ${f}) endif() endforeach(f ${Eigen_directory_files}) install(FILES ${Eigen_directory_files_to_install} DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen COMPONENT Devel ) add_subdirectory(src) antimony/lib/fab/vendor/Eigen/Dense0000644000175000017500000000017213341130426016504 0ustar tiagotiago#include "Core" #include "LU" #include "Cholesky" #include "QR" #include "SVD" #include "Geometry" #include "Eigenvalues" antimony/lib/fab/vendor/Eigen/Sparse0000644000175000017500000000112213341130426016677 0ustar tiagotiago#ifndef EIGEN_SPARSE_MODULE_H #define EIGEN_SPARSE_MODULE_H /** \defgroup Sparse_Module Sparse meta-module * * Meta-module including all related modules: * - \ref SparseCore_Module * - \ref OrderingMethods_Module * - \ref SparseCholesky_Module * - \ref SparseLU_Module * - \ref SparseQR_Module * - \ref IterativeLinearSolvers_Module * * \code * #include * \endcode */ #include "SparseCore" #include "OrderingMethods" #include "SparseCholesky" #include "SparseLU" #include "SparseQR" #include "IterativeLinearSolvers" #endif // EIGEN_SPARSE_MODULE_H antimony/lib/fab/vendor/Eigen/StdList0000644000175000017500000000125213341130426017034 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Hauke Heibel // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_STDLIST_MODULE_H #define EIGEN_STDLIST_MODULE_H #include "Core" #include #if (defined(_MSC_VER) && defined(_WIN64)) /* MSVC auto aligns in 64 bit builds */ #define EIGEN_DEFINE_STL_LIST_SPECIALIZATION(...) #else #include "src/StlSupport/StdList.h" #endif #endif // EIGEN_STDLIST_MODULE_H antimony/lib/fab/vendor/Eigen/Eigen0000644000175000017500000000004513341130426016474 0ustar tiagotiago#include "Dense" //#include "Sparse" antimony/lib/fab/vendor/Eigen/MetisSupport0000644000175000017500000000127113341130426020125 0ustar tiagotiago#ifndef EIGEN_METISSUPPORT_MODULE_H #define EIGEN_METISSUPPORT_MODULE_H #include "SparseCore" #include "src/Core/util/DisableStupidWarnings.h" extern "C" { #include } /** \ingroup Support_modules * \defgroup MetisSupport_Module MetisSupport module * * \code * #include * \endcode * This module defines an interface to the METIS reordering package (http://glaros.dtc.umn.edu/gkhome/views/metis). * It can be used just as any other built-in method as explained in \link OrderingMethods_Module here. \endlink */ #include "src/MetisSupport/MetisSupport.h" #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_METISSUPPORT_MODULE_H antimony/lib/fab/vendor/Eigen/Geometry0000644000175000017500000000310513341130426017240 0ustar tiagotiago#ifndef EIGEN_GEOMETRY_MODULE_H #define EIGEN_GEOMETRY_MODULE_H #include "Core" #include "src/Core/util/DisableStupidWarnings.h" #include "SVD" #include "LU" #include #ifndef M_PI #define M_PI 3.14159265358979323846 #endif /** \defgroup Geometry_Module Geometry module * * * * This module provides support for: * - fixed-size homogeneous transformations * - translation, scaling, 2D and 3D rotations * - quaternions * - \ref MatrixBase::cross() "cross product" * - \ref MatrixBase::unitOrthogonal() "orthognal vector generation" * - some linear components: parametrized-lines and hyperplanes * * \code * #include * \endcode */ #include "src/Geometry/OrthoMethods.h" #include "src/Geometry/EulerAngles.h" #if EIGEN2_SUPPORT_STAGE > STAGE20_RESOLVE_API_CONFLICTS #include "src/Geometry/Homogeneous.h" #include "src/Geometry/RotationBase.h" #include "src/Geometry/Rotation2D.h" #include "src/Geometry/Quaternion.h" #include "src/Geometry/AngleAxis.h" #include "src/Geometry/Transform.h" #include "src/Geometry/Translation.h" #include "src/Geometry/Scaling.h" #include "src/Geometry/Hyperplane.h" #include "src/Geometry/ParametrizedLine.h" #include "src/Geometry/AlignedBox.h" #include "src/Geometry/Umeyama.h" #if defined EIGEN_VECTORIZE_SSE #include "src/Geometry/arch/Geometry_SSE.h" #endif #endif #ifdef EIGEN2_SUPPORT #include "src/Eigen2Support/Geometry/All.h" #endif #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_GEOMETRY_MODULE_H /* vim: set filetype=cpp et sw=2 ts=2 ai: */ antimony/lib/fab/vendor/Eigen/COPYING.GPL0000644000175000017500000010451313341130426017203 0ustar tiagotiago GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . antimony/lib/fab/vendor/Eigen/Jacobi0000644000175000017500000000120513341130426016633 0ustar tiagotiago#ifndef EIGEN_JACOBI_MODULE_H #define EIGEN_JACOBI_MODULE_H #include "Core" #include "src/Core/util/DisableStupidWarnings.h" /** \defgroup Jacobi_Module Jacobi module * This module provides Jacobi and Givens rotations. * * \code * #include * \endcode * * In addition to listed classes, it defines the two following MatrixBase methods to apply a Jacobi or Givens rotation: * - MatrixBase::applyOnTheLeft() * - MatrixBase::applyOnTheRight(). */ #include "src/Jacobi/Jacobi.h" #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_JACOBI_MODULE_H /* vim: set filetype=cpp et sw=2 ts=2 ai: */ antimony/lib/fab/vendor/Eigen/SparseCore0000644000175000017500000000345213341130426017520 0ustar tiagotiago#ifndef EIGEN_SPARSECORE_MODULE_H #define EIGEN_SPARSECORE_MODULE_H #include "Core" #include "src/Core/util/DisableStupidWarnings.h" #include #include #include #include #include /** * \defgroup SparseCore_Module SparseCore module * * This module provides a sparse matrix representation, and basic associatd matrix manipulations * and operations. * * See the \ref TutorialSparse "Sparse tutorial" * * \code * #include * \endcode * * This module depends on: Core. */ namespace Eigen { /** The type used to identify a general sparse storage. */ struct Sparse {}; } #include "src/SparseCore/SparseUtil.h" #include "src/SparseCore/SparseMatrixBase.h" #include "src/SparseCore/CompressedStorage.h" #include "src/SparseCore/AmbiVector.h" #include "src/SparseCore/SparseMatrix.h" #include "src/SparseCore/MappedSparseMatrix.h" #include "src/SparseCore/SparseVector.h" #include "src/SparseCore/SparseBlock.h" #include "src/SparseCore/SparseTranspose.h" #include "src/SparseCore/SparseCwiseUnaryOp.h" #include "src/SparseCore/SparseCwiseBinaryOp.h" #include "src/SparseCore/SparseDot.h" #include "src/SparseCore/SparsePermutation.h" #include "src/SparseCore/SparseRedux.h" #include "src/SparseCore/SparseFuzzy.h" #include "src/SparseCore/ConservativeSparseSparseProduct.h" #include "src/SparseCore/SparseSparseProductWithPruning.h" #include "src/SparseCore/SparseProduct.h" #include "src/SparseCore/SparseDenseProduct.h" #include "src/SparseCore/SparseDiagonalProduct.h" #include "src/SparseCore/SparseTriangularView.h" #include "src/SparseCore/SparseSelfAdjointView.h" #include "src/SparseCore/TriangularSolver.h" #include "src/SparseCore/SparseView.h" #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_SPARSECORE_MODULE_H antimony/lib/fab/vendor/Eigen/Householder0000644000175000017500000000110413341130426017723 0ustar tiagotiago#ifndef EIGEN_HOUSEHOLDER_MODULE_H #define EIGEN_HOUSEHOLDER_MODULE_H #include "Core" #include "src/Core/util/DisableStupidWarnings.h" /** \defgroup Householder_Module Householder module * This module provides Householder transformations. * * \code * #include * \endcode */ #include "src/Householder/Householder.h" #include "src/Householder/HouseholderSequence.h" #include "src/Householder/BlockHouseholder.h" #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_HOUSEHOLDER_MODULE_H /* vim: set filetype=cpp et sw=2 ts=2 ai: */ antimony/lib/fab/vendor/Eigen/COPYING.README0000644000175000017500000000141313341130426017511 0ustar tiagotiagoEigen is primarily MPL2 licensed. See COPYING.MPL2 and these links: http://www.mozilla.org/MPL/2.0/ http://www.mozilla.org/MPL/2.0/FAQ.html Some files contain third-party code under BSD or LGPL licenses, whence the other COPYING.* files here. All the LGPL code is either LGPL 2.1-only, or LGPL 2.1-or-later. For this reason, the COPYING.LGPL file contains the LGPL 2.1 text. If you want to guarantee that the Eigen code that you are #including is licensed under the MPL2 and possibly more permissive licenses (like BSD), #define this preprocessor symbol: EIGEN_MPL2_ONLY For example, with most compilers, you could add this to your project CXXFLAGS: -DEIGEN_MPL2_ONLY This will cause a compilation error to be generated if you #include any code that is LGPL licensed. antimony/lib/fab/vendor/Eigen/SparseLU0000644000175000017500000000336013341130426017146 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam // Copyright (C) 2012 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSELU_MODULE_H #define EIGEN_SPARSELU_MODULE_H #include "SparseCore" /** * \defgroup SparseLU_Module SparseLU module * This module defines a supernodal factorization of general sparse matrices. * The code is fully optimized for supernode-panel updates with specialized kernels. * Please, see the documentation of the SparseLU class for more details. */ #include "src/misc/Solve.h" #include "src/misc/SparseSolve.h" // Ordering interface #include "OrderingMethods" #include "src/SparseLU/SparseLU_gemm_kernel.h" #include "src/SparseLU/SparseLU_Structs.h" #include "src/SparseLU/SparseLU_SupernodalMatrix.h" #include "src/SparseLU/SparseLUImpl.h" #include "src/SparseCore/SparseColEtree.h" #include "src/SparseLU/SparseLU_Memory.h" #include "src/SparseLU/SparseLU_heap_relax_snode.h" #include "src/SparseLU/SparseLU_relax_snode.h" #include "src/SparseLU/SparseLU_pivotL.h" #include "src/SparseLU/SparseLU_panel_dfs.h" #include "src/SparseLU/SparseLU_kernel_bmod.h" #include "src/SparseLU/SparseLU_panel_bmod.h" #include "src/SparseLU/SparseLU_column_dfs.h" #include "src/SparseLU/SparseLU_column_bmod.h" #include "src/SparseLU/SparseLU_copy_to_ucol.h" #include "src/SparseLU/SparseLU_pruneL.h" #include "src/SparseLU/SparseLU_Utils.h" #include "src/SparseLU/SparseLU.h" #endif // EIGEN_SPARSELU_MODULE_H antimony/lib/fab/vendor/Eigen/SVD0000644000175000017500000000153213341130426016103 0ustar tiagotiago#ifndef EIGEN_SVD_MODULE_H #define EIGEN_SVD_MODULE_H #include "QR" #include "Householder" #include "Jacobi" #include "src/Core/util/DisableStupidWarnings.h" /** \defgroup SVD_Module SVD module * * * * This module provides SVD decomposition for matrices (both real and complex). * This decomposition is accessible via the following MatrixBase method: * - MatrixBase::jacobiSvd() * * \code * #include * \endcode */ #include "src/misc/Solve.h" #include "src/SVD/JacobiSVD.h" #if defined(EIGEN_USE_LAPACKE) && !defined(EIGEN_USE_LAPACKE_STRICT) #include "src/SVD/JacobiSVD_MKL.h" #endif #include "src/SVD/UpperBidiagonalization.h" #ifdef EIGEN2_SUPPORT #include "src/Eigen2Support/SVD.h" #endif #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_SVD_MODULE_H /* vim: set filetype=cpp et sw=2 ts=2 ai: */ antimony/lib/fab/vendor/Eigen/SparseQR0000644000175000017500000000173713341130426017156 0ustar tiagotiago#ifndef EIGEN_SPARSEQR_MODULE_H #define EIGEN_SPARSEQR_MODULE_H #include "SparseCore" #include "OrderingMethods" #include "src/Core/util/DisableStupidWarnings.h" /** \defgroup SparseQR_Module SparseQR module * \brief Provides QR decomposition for sparse matrices * * This module provides a simplicial version of the left-looking Sparse QR decomposition. * The columns of the input matrix should be reordered to limit the fill-in during the * decomposition. Built-in methods (COLAMD, AMD) or external methods (METIS) can be used to this end. * See the \link OrderingMethods_Module OrderingMethods\endlink module for the list * of built-in and external ordering methods. * * \code * #include * \endcode * * */ #include "src/misc/Solve.h" #include "src/misc/SparseSolve.h" #include "OrderingMethods" #include "src/SparseCore/SparseColEtree.h" #include "src/SparseQR/SparseQR.h" #include "src/Core/util/ReenableStupidWarnings.h" #endif antimony/lib/fab/vendor/Eigen/PaStiXSupport0000644000175000017500000000267313341130426020223 0ustar tiagotiago#ifndef EIGEN_PASTIXSUPPORT_MODULE_H #define EIGEN_PASTIXSUPPORT_MODULE_H #include "SparseCore" #include "src/Core/util/DisableStupidWarnings.h" #include extern "C" { #include #include } #ifdef complex #undef complex #endif /** \ingroup Support_modules * \defgroup PaStiXSupport_Module PaStiXSupport module * * This module provides an interface to the PaSTiX library. * PaSTiX is a general \b supernodal, \b parallel and \b opensource sparse solver. * It provides the two following main factorization classes: * - class PastixLLT : a supernodal, parallel LLt Cholesky factorization. * - class PastixLDLT: a supernodal, parallel LDLt Cholesky factorization. * - class PastixLU : a supernodal, parallel LU factorization (optimized for a symmetric pattern). * * \code * #include * \endcode * * In order to use this module, the PaSTiX headers must be accessible from the include paths, and your binary must be linked to the PaSTiX library and its dependencies. * The dependencies depend on how PaSTiX has been compiled. * For a cmake based project, you can use our FindPaSTiX.cmake module to help you in this task. * */ #include "src/misc/Solve.h" #include "src/misc/SparseSolve.h" #include "src/PaStiXSupport/PaStiXSupport.h" #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_PASTIXSUPPORT_MODULE_H antimony/lib/fab/vendor/Eigen/LeastSquares0000644000175000017500000000131013341130426020055 0ustar tiagotiago#ifndef EIGEN_REGRESSION_MODULE_H #define EIGEN_REGRESSION_MODULE_H #ifndef EIGEN2_SUPPORT #error LeastSquares is only available in Eigen2 support mode (define EIGEN2_SUPPORT) #endif // exclude from normal eigen3-only documentation #ifdef EIGEN2_SUPPORT #include "Core" #include "src/Core/util/DisableStupidWarnings.h" #include "Eigenvalues" #include "Geometry" /** \defgroup LeastSquares_Module LeastSquares module * This module provides linear regression and related features. * * \code * #include * \endcode */ #include "src/Eigen2Support/LeastSquares.h" #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN2_SUPPORT #endif // EIGEN_REGRESSION_MODULE_H antimony/lib/fab/vendor/Eigen/SPQRSupport0000644000175000017500000000167413341130426017640 0ustar tiagotiago#ifndef EIGEN_SPQRSUPPORT_MODULE_H #define EIGEN_SPQRSUPPORT_MODULE_H #include "SparseCore" #include "src/Core/util/DisableStupidWarnings.h" #include "SuiteSparseQR.hpp" /** \ingroup Support_modules * \defgroup SPQRSupport_Module SuiteSparseQR module * * This module provides an interface to the SPQR library, which is part of the suitesparse package. * * \code * #include * \endcode * * In order to use this module, the SPQR headers must be accessible from the include paths, and your binary must be linked to the SPQR library and its dependencies (Cholmod, AMD, COLAMD,...). * For a cmake based project, you can use our FindSPQR.cmake and FindCholmod.Cmake modules * */ #include "src/misc/Solve.h" #include "src/misc/SparseSolve.h" #include "src/CholmodSupport/CholmodSupport.h" #include "src/SPQRSupport/SuiteSparseQRSupport.h" #endif antimony/lib/fab/vendor/Eigen/Core0000644000175000017500000003103413341130426016337 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // Copyright (C) 2007-2011 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CORE_H #define EIGEN_CORE_H // first thing Eigen does: stop the compiler from committing suicide #include "src/Core/util/DisableStupidWarnings.h" // then include this file where all our macros are defined. It's really important to do it first because // it's where we do all the alignment settings (platform detection and honoring the user's will if he // defined e.g. EIGEN_DONT_ALIGN) so it needs to be done before we do anything with vectorization. #include "src/Core/util/Macros.h" // Disable the ipa-cp-clone optimization flag with MinGW 6.x or newer (enabled by default with -O3) // See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=556 for details. #if defined(__MINGW32__) && EIGEN_GNUC_AT_LEAST(4,6) #pragma GCC optimize ("-fno-ipa-cp-clone") #endif #include // this include file manages BLAS and MKL related macros // and inclusion of their respective header files #include "src/Core/util/MKL_support.h" // if alignment is disabled, then disable vectorization. Note: EIGEN_ALIGN is the proper check, it takes into // account both the user's will (EIGEN_DONT_ALIGN) and our own platform checks #if !EIGEN_ALIGN #ifndef EIGEN_DONT_VECTORIZE #define EIGEN_DONT_VECTORIZE #endif #endif #ifdef _MSC_VER #include // for _aligned_malloc -- need it regardless of whether vectorization is enabled #if (_MSC_VER >= 1500) // 2008 or later // Remember that usage of defined() in a #define is undefined by the standard. // a user reported that in 64-bit mode, MSVC doesn't care to define _M_IX86_FP. #if (defined(_M_IX86_FP) && (_M_IX86_FP >= 2)) || defined(_M_X64) #define EIGEN_SSE2_ON_MSVC_2008_OR_LATER #endif #endif #else // Remember that usage of defined() in a #define is undefined by the standard #if (defined __SSE2__) && ( (!defined __GNUC__) || (defined __INTEL_COMPILER) || EIGEN_GNUC_AT_LEAST(4,2) ) #define EIGEN_SSE2_ON_NON_MSVC_BUT_NOT_OLD_GCC #endif #endif #ifndef EIGEN_DONT_VECTORIZE #if defined (EIGEN_SSE2_ON_NON_MSVC_BUT_NOT_OLD_GCC) || defined(EIGEN_SSE2_ON_MSVC_2008_OR_LATER) // Defines symbols for compile-time detection of which instructions are // used. // EIGEN_VECTORIZE_YY is defined if and only if the instruction set YY is used #define EIGEN_VECTORIZE #define EIGEN_VECTORIZE_SSE #define EIGEN_VECTORIZE_SSE2 // Detect sse3/ssse3/sse4: // gcc and icc defines __SSE3__, ... // there is no way to know about this on msvc. You can define EIGEN_VECTORIZE_SSE* if you // want to force the use of those instructions with msvc. #ifdef __SSE3__ #define EIGEN_VECTORIZE_SSE3 #endif #ifdef __SSSE3__ #define EIGEN_VECTORIZE_SSSE3 #endif #ifdef __SSE4_1__ #define EIGEN_VECTORIZE_SSE4_1 #endif #ifdef __SSE4_2__ #define EIGEN_VECTORIZE_SSE4_2 #endif // include files // This extern "C" works around a MINGW-w64 compilation issue // https://sourceforge.net/tracker/index.php?func=detail&aid=3018394&group_id=202880&atid=983354 // In essence, intrin.h is included by windows.h and also declares intrinsics (just as emmintrin.h etc. below do). // However, intrin.h uses an extern "C" declaration, and g++ thus complains of duplicate declarations // with conflicting linkage. The linkage for intrinsics doesn't matter, but at that stage the compiler doesn't know; // so, to avoid compile errors when windows.h is included after Eigen/Core, ensure intrinsics are extern "C" here too. // notice that since these are C headers, the extern "C" is theoretically needed anyways. extern "C" { // In theory we should only include immintrin.h and not the other *mmintrin.h header files directly. // Doing so triggers some issues with ICC. However old gcc versions seems to not have this file, thus: #if defined(__INTEL_COMPILER) && __INTEL_COMPILER >= 1110 #include #else #include #include #ifdef EIGEN_VECTORIZE_SSE3 #include #endif #ifdef EIGEN_VECTORIZE_SSSE3 #include #endif #ifdef EIGEN_VECTORIZE_SSE4_1 #include #endif #ifdef EIGEN_VECTORIZE_SSE4_2 #include #endif #endif } // end extern "C" #elif defined __ALTIVEC__ #define EIGEN_VECTORIZE #define EIGEN_VECTORIZE_ALTIVEC #include // We need to #undef all these ugly tokens defined in // => use __vector instead of vector #undef bool #undef vector #undef pixel #elif defined __ARM_NEON__ #define EIGEN_VECTORIZE #define EIGEN_VECTORIZE_NEON #include #endif #endif #if (defined _OPENMP) && (!defined EIGEN_DONT_PARALLELIZE) #define EIGEN_HAS_OPENMP #endif #ifdef EIGEN_HAS_OPENMP #include #endif // MSVC for windows mobile does not have the errno.h file #if !(defined(_MSC_VER) && defined(_WIN32_WCE)) && !defined(__ARMCC_VERSION) #define EIGEN_HAS_ERRNO #endif #ifdef EIGEN_HAS_ERRNO #include #endif #include #include #include #include #include #include #include #include #include #include // for CHAR_BIT // for min/max: #include // for outputting debug info #ifdef EIGEN_DEBUG_ASSIGN #include #endif // required for __cpuid, needs to be included after cmath #if defined(_MSC_VER) && (defined(_M_IX86)||defined(_M_X64)) && (!defined(_WIN32_WCE)) #include #endif #if defined(_CPPUNWIND) || defined(__EXCEPTIONS) #define EIGEN_EXCEPTIONS #endif #ifdef EIGEN_EXCEPTIONS #include #endif /** \brief Namespace containing all symbols from the %Eigen library. */ namespace Eigen { inline static const char *SimdInstructionSetsInUse(void) { #if defined(EIGEN_VECTORIZE_SSE4_2) return "SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2"; #elif defined(EIGEN_VECTORIZE_SSE4_1) return "SSE, SSE2, SSE3, SSSE3, SSE4.1"; #elif defined(EIGEN_VECTORIZE_SSSE3) return "SSE, SSE2, SSE3, SSSE3"; #elif defined(EIGEN_VECTORIZE_SSE3) return "SSE, SSE2, SSE3"; #elif defined(EIGEN_VECTORIZE_SSE2) return "SSE, SSE2"; #elif defined(EIGEN_VECTORIZE_ALTIVEC) return "AltiVec"; #elif defined(EIGEN_VECTORIZE_NEON) return "ARM NEON"; #else return "None"; #endif } } // end namespace Eigen #define STAGE10_FULL_EIGEN2_API 10 #define STAGE20_RESOLVE_API_CONFLICTS 20 #define STAGE30_FULL_EIGEN3_API 30 #define STAGE40_FULL_EIGEN3_STRICTNESS 40 #define STAGE99_NO_EIGEN2_SUPPORT 99 #if defined EIGEN2_SUPPORT_STAGE40_FULL_EIGEN3_STRICTNESS #define EIGEN2_SUPPORT #define EIGEN2_SUPPORT_STAGE STAGE40_FULL_EIGEN3_STRICTNESS #elif defined EIGEN2_SUPPORT_STAGE30_FULL_EIGEN3_API #define EIGEN2_SUPPORT #define EIGEN2_SUPPORT_STAGE STAGE30_FULL_EIGEN3_API #elif defined EIGEN2_SUPPORT_STAGE20_RESOLVE_API_CONFLICTS #define EIGEN2_SUPPORT #define EIGEN2_SUPPORT_STAGE STAGE20_RESOLVE_API_CONFLICTS #elif defined EIGEN2_SUPPORT_STAGE10_FULL_EIGEN2_API #define EIGEN2_SUPPORT #define EIGEN2_SUPPORT_STAGE STAGE10_FULL_EIGEN2_API #elif defined EIGEN2_SUPPORT // default to stage 3, that's what it's always meant #define EIGEN2_SUPPORT_STAGE30_FULL_EIGEN3_API #define EIGEN2_SUPPORT_STAGE STAGE30_FULL_EIGEN3_API #else #define EIGEN2_SUPPORT_STAGE STAGE99_NO_EIGEN2_SUPPORT #endif #ifdef EIGEN2_SUPPORT #undef minor #endif // we use size_t frequently and we'll never remember to prepend it with std:: everytime just to // ensure QNX/QCC support using std::size_t; // gcc 4.6.0 wants std:: for ptrdiff_t using std::ptrdiff_t; /** \defgroup Core_Module Core module * This is the main module of Eigen providing dense matrix and vector support * (both fixed and dynamic size) with all the features corresponding to a BLAS library * and much more... * * \code * #include * \endcode */ #include "src/Core/util/Constants.h" #include "src/Core/util/ForwardDeclarations.h" #include "src/Core/util/Meta.h" #include "src/Core/util/StaticAssert.h" #include "src/Core/util/XprHelper.h" #include "src/Core/util/Memory.h" #include "src/Core/NumTraits.h" #include "src/Core/MathFunctions.h" #include "src/Core/GenericPacketMath.h" #if defined EIGEN_VECTORIZE_SSE #include "src/Core/arch/SSE/PacketMath.h" #include "src/Core/arch/SSE/MathFunctions.h" #include "src/Core/arch/SSE/Complex.h" #elif defined EIGEN_VECTORIZE_ALTIVEC #include "src/Core/arch/AltiVec/PacketMath.h" #include "src/Core/arch/AltiVec/Complex.h" #elif defined EIGEN_VECTORIZE_NEON #include "src/Core/arch/NEON/PacketMath.h" #include "src/Core/arch/NEON/Complex.h" #endif #include "src/Core/arch/Default/Settings.h" #include "src/Core/Functors.h" #include "src/Core/DenseCoeffsBase.h" #include "src/Core/DenseBase.h" #include "src/Core/MatrixBase.h" #include "src/Core/EigenBase.h" #ifndef EIGEN_PARSED_BY_DOXYGEN // work around Doxygen bug triggered by Assign.h r814874 // at least confirmed with Doxygen 1.5.5 and 1.5.6 #include "src/Core/Assign.h" #endif #include "src/Core/util/BlasUtil.h" #include "src/Core/DenseStorage.h" #include "src/Core/NestByValue.h" #include "src/Core/ForceAlignedAccess.h" #include "src/Core/ReturnByValue.h" #include "src/Core/NoAlias.h" #include "src/Core/PlainObjectBase.h" #include "src/Core/Matrix.h" #include "src/Core/Array.h" #include "src/Core/CwiseBinaryOp.h" #include "src/Core/CwiseUnaryOp.h" #include "src/Core/CwiseNullaryOp.h" #include "src/Core/CwiseUnaryView.h" #include "src/Core/SelfCwiseBinaryOp.h" #include "src/Core/Dot.h" #include "src/Core/StableNorm.h" #include "src/Core/MapBase.h" #include "src/Core/Stride.h" #include "src/Core/Map.h" #include "src/Core/Block.h" #include "src/Core/VectorBlock.h" #include "src/Core/Ref.h" #include "src/Core/Transpose.h" #include "src/Core/DiagonalMatrix.h" #include "src/Core/Diagonal.h" #include "src/Core/DiagonalProduct.h" #include "src/Core/PermutationMatrix.h" #include "src/Core/Transpositions.h" #include "src/Core/Redux.h" #include "src/Core/Visitor.h" #include "src/Core/Fuzzy.h" #include "src/Core/IO.h" #include "src/Core/Swap.h" #include "src/Core/CommaInitializer.h" #include "src/Core/Flagged.h" #include "src/Core/ProductBase.h" #include "src/Core/GeneralProduct.h" #include "src/Core/TriangularMatrix.h" #include "src/Core/SelfAdjointView.h" #include "src/Core/products/GeneralBlockPanelKernel.h" #include "src/Core/products/Parallelizer.h" #include "src/Core/products/CoeffBasedProduct.h" #include "src/Core/products/GeneralMatrixVector.h" #include "src/Core/products/GeneralMatrixMatrix.h" #include "src/Core/SolveTriangular.h" #include "src/Core/products/GeneralMatrixMatrixTriangular.h" #include "src/Core/products/SelfadjointMatrixVector.h" #include "src/Core/products/SelfadjointMatrixMatrix.h" #include "src/Core/products/SelfadjointProduct.h" #include "src/Core/products/SelfadjointRank2Update.h" #include "src/Core/products/TriangularMatrixVector.h" #include "src/Core/products/TriangularMatrixMatrix.h" #include "src/Core/products/TriangularSolverMatrix.h" #include "src/Core/products/TriangularSolverVector.h" #include "src/Core/BandMatrix.h" #include "src/Core/CoreIterators.h" #include "src/Core/BooleanRedux.h" #include "src/Core/Select.h" #include "src/Core/VectorwiseOp.h" #include "src/Core/Random.h" #include "src/Core/Replicate.h" #include "src/Core/Reverse.h" #include "src/Core/ArrayBase.h" #include "src/Core/ArrayWrapper.h" #ifdef EIGEN_USE_BLAS #include "src/Core/products/GeneralMatrixMatrix_MKL.h" #include "src/Core/products/GeneralMatrixVector_MKL.h" #include "src/Core/products/GeneralMatrixMatrixTriangular_MKL.h" #include "src/Core/products/SelfadjointMatrixMatrix_MKL.h" #include "src/Core/products/SelfadjointMatrixVector_MKL.h" #include "src/Core/products/TriangularMatrixMatrix_MKL.h" #include "src/Core/products/TriangularMatrixVector_MKL.h" #include "src/Core/products/TriangularSolverMatrix_MKL.h" #endif // EIGEN_USE_BLAS #ifdef EIGEN_USE_MKL_VML #include "src/Core/Assign_MKL.h" #endif #include "src/Core/GlobalFunctions.h" #include "src/Core/util/ReenableStupidWarnings.h" #ifdef EIGEN2_SUPPORT #include "Eigen2Support" #endif #endif // EIGEN_CORE_H antimony/lib/fab/vendor/Eigen/COPYING.BSD0000644000175000017500000000275413341130426017175 0ustar tiagotiago/* Copyright (c) 2011, Intel Corporation. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */antimony/lib/fab/vendor/Eigen/SuperLUSupport0000644000175000017500000000356013341130426020406 0ustar tiagotiago#ifndef EIGEN_SUPERLUSUPPORT_MODULE_H #define EIGEN_SUPERLUSUPPORT_MODULE_H #include "SparseCore" #include "src/Core/util/DisableStupidWarnings.h" #ifdef EMPTY #define EIGEN_EMPTY_WAS_ALREADY_DEFINED #endif typedef int int_t; #include #include #include // slu_util.h defines a preprocessor token named EMPTY which is really polluting, // so we remove it in favor of a SUPERLU_EMPTY token. // If EMPTY was already defined then we don't undef it. #if defined(EIGEN_EMPTY_WAS_ALREADY_DEFINED) # undef EIGEN_EMPTY_WAS_ALREADY_DEFINED #elif defined(EMPTY) # undef EMPTY #endif #define SUPERLU_EMPTY (-1) namespace Eigen { struct SluMatrix; } /** \ingroup Support_modules * \defgroup SuperLUSupport_Module SuperLUSupport module * * This module provides an interface to the SuperLU library. * It provides the following factorization class: * - class SuperLU: a supernodal sequential LU factorization. * - class SuperILU: a supernodal sequential incomplete LU factorization (to be used as a preconditioner for iterative methods). * * \warning When including this module, you have to use SUPERLU_EMPTY instead of EMPTY which is no longer defined because it is too polluting. * * \code * #include * \endcode * * In order to use this module, the superlu headers must be accessible from the include paths, and your binary must be linked to the superlu library and its dependencies. * The dependencies depend on how superlu has been compiled. * For a cmake based project, you can use our FindSuperLU.cmake module to help you in this task. * */ #include "src/misc/Solve.h" #include "src/misc/SparseSolve.h" #include "src/SuperLUSupport/SuperLUSupport.h" #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_SUPERLUSUPPORT_MODULE_H antimony/lib/fab/vendor/Eigen/LU0000644000175000017500000000172713341130426015775 0ustar tiagotiago#ifndef EIGEN_LU_MODULE_H #define EIGEN_LU_MODULE_H #include "Core" #include "src/Core/util/DisableStupidWarnings.h" /** \defgroup LU_Module LU module * This module includes %LU decomposition and related notions such as matrix inversion and determinant. * This module defines the following MatrixBase methods: * - MatrixBase::inverse() * - MatrixBase::determinant() * * \code * #include * \endcode */ #include "src/misc/Solve.h" #include "src/misc/Kernel.h" #include "src/misc/Image.h" #include "src/LU/FullPivLU.h" #include "src/LU/PartialPivLU.h" #ifdef EIGEN_USE_LAPACKE #include "src/LU/PartialPivLU_MKL.h" #endif #include "src/LU/Determinant.h" #include "src/LU/Inverse.h" #if defined EIGEN_VECTORIZE_SSE #include "src/LU/arch/Inverse_SSE.h" #endif #ifdef EIGEN2_SUPPORT #include "src/Eigen2Support/LU.h" #endif #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_LU_MODULE_H /* vim: set filetype=cpp et sw=2 ts=2 ai: */ antimony/lib/fab/vendor/Eigen/src/0000755000175000017500000000000013341130426016312 5ustar tiagotiagoantimony/lib/fab/vendor/Eigen/src/CMakeLists.txt0000644000175000017500000000046413341130426021056 0ustar tiagotiagofile(GLOB Eigen_src_subdirectories "*") escape_string_as_regex(ESCAPED_CMAKE_CURRENT_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}") foreach(f ${Eigen_src_subdirectories}) if(NOT f MATCHES "\\.txt" AND NOT f MATCHES "${ESCAPED_CMAKE_CURRENT_SOURCE_DIR}/[.].+" ) add_subdirectory(${f}) endif() endforeach() antimony/lib/fab/vendor/Eigen/src/MetisSupport/0000755000175000017500000000000013341130426020770 5ustar tiagotiagoantimony/lib/fab/vendor/Eigen/src/MetisSupport/CMakeLists.txt0000644000175000017500000000024613341130426023532 0ustar tiagotiagoFILE(GLOB Eigen_MetisSupport_SRCS "*.h") INSTALL(FILES ${Eigen_MetisSupport_SRCS} DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/MetisSupport COMPONENT Devel ) antimony/lib/fab/vendor/Eigen/src/MetisSupport/MetisSupport.h0000644000175000017500000001050013341130426023613 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef METIS_SUPPORT_H #define METIS_SUPPORT_H namespace Eigen { /** * Get the fill-reducing ordering from the METIS package * * If A is the original matrix and Ap is the permuted matrix, * the fill-reducing permutation is defined as follows : * Row (column) i of A is the matperm(i) row (column) of Ap. * WARNING: As computed by METIS, this corresponds to the vector iperm (instead of perm) */ template class MetisOrdering { public: typedef PermutationMatrix PermutationType; typedef Matrix IndexVector; template void get_symmetrized_graph(const MatrixType& A) { Index m = A.cols(); eigen_assert((A.rows() == A.cols()) && "ONLY FOR SQUARED MATRICES"); // Get the transpose of the input matrix MatrixType At = A.transpose(); // Get the number of nonzeros elements in each row/col of At+A Index TotNz = 0; IndexVector visited(m); visited.setConstant(-1); for (int j = 0; j < m; j++) { // Compute the union structure of of A(j,:) and At(j,:) visited(j) = j; // Do not include the diagonal element // Get the nonzeros in row/column j of A for (typename MatrixType::InnerIterator it(A, j); it; ++it) { Index idx = it.index(); // Get the row index (for column major) or column index (for row major) if (visited(idx) != j ) { visited(idx) = j; ++TotNz; } } //Get the nonzeros in row/column j of At for (typename MatrixType::InnerIterator it(At, j); it; ++it) { Index idx = it.index(); if(visited(idx) != j) { visited(idx) = j; ++TotNz; } } } // Reserve place for A + At m_indexPtr.resize(m+1); m_innerIndices.resize(TotNz); // Now compute the real adjacency list of each column/row visited.setConstant(-1); Index CurNz = 0; for (int j = 0; j < m; j++) { m_indexPtr(j) = CurNz; visited(j) = j; // Do not include the diagonal element // Add the pattern of row/column j of A to A+At for (typename MatrixType::InnerIterator it(A,j); it; ++it) { Index idx = it.index(); // Get the row index (for column major) or column index (for row major) if (visited(idx) != j ) { visited(idx) = j; m_innerIndices(CurNz) = idx; CurNz++; } } //Add the pattern of row/column j of At to A+At for (typename MatrixType::InnerIterator it(At, j); it; ++it) { Index idx = it.index(); if(visited(idx) != j) { visited(idx) = j; m_innerIndices(CurNz) = idx; ++CurNz; } } } m_indexPtr(m) = CurNz; } template void operator() (const MatrixType& A, PermutationType& matperm) { Index m = A.cols(); IndexVector perm(m),iperm(m); // First, symmetrize the matrix graph. get_symmetrized_graph(A); int output_error; // Call the fill-reducing routine from METIS output_error = METIS_NodeND(&m, m_indexPtr.data(), m_innerIndices.data(), NULL, NULL, perm.data(), iperm.data()); if(output_error != METIS_OK) { //FIXME The ordering interface should define a class of possible errors std::cerr << "ERROR WHILE CALLING THE METIS PACKAGE \n"; return; } // Get the fill-reducing permutation //NOTE: If Ap is the permuted matrix then perm and iperm vectors are defined as follows // Row (column) i of Ap is the perm(i) row(column) of A, and row (column) i of A is the iperm(i) row(column) of Ap matperm.resize(m); for (int j = 0; j < m; j++) matperm.indices()(iperm(j)) = j; } protected: IndexVector m_indexPtr; // Pointer to the adjacenccy list of each row/column IndexVector m_innerIndices; // Adjacency list }; }// end namespace eigen #endif antimony/lib/fab/vendor/Eigen/src/misc/0000755000175000017500000000000013341130426017245 5ustar tiagotiagoantimony/lib/fab/vendor/Eigen/src/misc/CMakeLists.txt0000644000175000017500000000021513341130426022003 0ustar tiagotiagoFILE(GLOB Eigen_misc_SRCS "*.h") INSTALL(FILES ${Eigen_misc_SRCS} DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/misc COMPONENT Devel ) antimony/lib/fab/vendor/Eigen/src/misc/blas.h0000644000175000017500000010150313341130426020337 0ustar tiagotiago#ifndef BLAS_H #define BLAS_H #ifdef __cplusplus extern "C" { #endif #define BLASFUNC(FUNC) FUNC##_ #ifdef __WIN64__ typedef long long BLASLONG; typedef unsigned long long BLASULONG; #else typedef long BLASLONG; typedef unsigned long BLASULONG; #endif int BLASFUNC(xerbla)(const char *, int *info, int); float BLASFUNC(sdot) (int *, float *, int *, float *, int *); float BLASFUNC(sdsdot)(int *, float *, float *, int *, float *, int *); double BLASFUNC(dsdot) (int *, float *, int *, float *, int *); double BLASFUNC(ddot) (int *, double *, int *, double *, int *); double BLASFUNC(qdot) (int *, double *, int *, double *, int *); int BLASFUNC(cdotuw) (int *, float *, int *, float *, int *, float*); int BLASFUNC(cdotcw) (int *, float *, int *, float *, int *, float*); int BLASFUNC(zdotuw) (int *, double *, int *, double *, int *, double*); int BLASFUNC(zdotcw) (int *, double *, int *, double *, int *, double*); int BLASFUNC(saxpy) (int *, float *, float *, int *, float *, int *); int BLASFUNC(daxpy) (int *, double *, double *, int *, double *, int *); int BLASFUNC(qaxpy) (int *, double *, double *, int *, double *, int *); int BLASFUNC(caxpy) (int *, float *, float *, int *, float *, int *); int BLASFUNC(zaxpy) (int *, double *, double *, int *, double *, int *); int BLASFUNC(xaxpy) (int *, double *, double *, int *, double *, int *); int BLASFUNC(caxpyc)(int *, float *, float *, int *, float *, int *); int BLASFUNC(zaxpyc)(int *, double *, double *, int *, double *, int *); int BLASFUNC(xaxpyc)(int *, double *, double *, int *, double *, int *); int BLASFUNC(scopy) (int *, float *, int *, float *, int *); int BLASFUNC(dcopy) (int *, double *, int *, double *, int *); int BLASFUNC(qcopy) (int *, double *, int *, double *, int *); int BLASFUNC(ccopy) (int *, float *, int *, float *, int *); int BLASFUNC(zcopy) (int *, double *, int *, double *, int *); int BLASFUNC(xcopy) (int *, double *, int *, double *, int *); int BLASFUNC(sswap) (int *, float *, int *, float *, int *); int BLASFUNC(dswap) (int *, double *, int *, double *, int *); int BLASFUNC(qswap) (int *, double *, int *, double *, int *); int BLASFUNC(cswap) (int *, float *, int *, float *, int *); int BLASFUNC(zswap) (int *, double *, int *, double *, int *); int BLASFUNC(xswap) (int *, double *, int *, double *, int *); float BLASFUNC(sasum) (int *, float *, int *); float BLASFUNC(scasum)(int *, float *, int *); double BLASFUNC(dasum) (int *, double *, int *); double BLASFUNC(qasum) (int *, double *, int *); double BLASFUNC(dzasum)(int *, double *, int *); double BLASFUNC(qxasum)(int *, double *, int *); int BLASFUNC(isamax)(int *, float *, int *); int BLASFUNC(idamax)(int *, double *, int *); int BLASFUNC(iqamax)(int *, double *, int *); int BLASFUNC(icamax)(int *, float *, int *); int BLASFUNC(izamax)(int *, double *, int *); int BLASFUNC(ixamax)(int *, double *, int *); int BLASFUNC(ismax) (int *, float *, int *); int BLASFUNC(idmax) (int *, double *, int *); int BLASFUNC(iqmax) (int *, double *, int *); int BLASFUNC(icmax) (int *, float *, int *); int BLASFUNC(izmax) (int *, double *, int *); int BLASFUNC(ixmax) (int *, double *, int *); int BLASFUNC(isamin)(int *, float *, int *); int BLASFUNC(idamin)(int *, double *, int *); int BLASFUNC(iqamin)(int *, double *, int *); int BLASFUNC(icamin)(int *, float *, int *); int BLASFUNC(izamin)(int *, double *, int *); int BLASFUNC(ixamin)(int *, double *, int *); int BLASFUNC(ismin)(int *, float *, int *); int BLASFUNC(idmin)(int *, double *, int *); int BLASFUNC(iqmin)(int *, double *, int *); int BLASFUNC(icmin)(int *, float *, int *); int BLASFUNC(izmin)(int *, double *, int *); int BLASFUNC(ixmin)(int *, double *, int *); float BLASFUNC(samax) (int *, float *, int *); double BLASFUNC(damax) (int *, double *, int *); double BLASFUNC(qamax) (int *, double *, int *); float BLASFUNC(scamax)(int *, float *, int *); double BLASFUNC(dzamax)(int *, double *, int *); double BLASFUNC(qxamax)(int *, double *, int *); float BLASFUNC(samin) (int *, float *, int *); double BLASFUNC(damin) (int *, double *, int *); double BLASFUNC(qamin) (int *, double *, int *); float BLASFUNC(scamin)(int *, float *, int *); double BLASFUNC(dzamin)(int *, double *, int *); double BLASFUNC(qxamin)(int *, double *, int *); float BLASFUNC(smax) (int *, float *, int *); double BLASFUNC(dmax) (int *, double *, int *); double BLASFUNC(qmax) (int *, double *, int *); float BLASFUNC(scmax) (int *, float *, int *); double BLASFUNC(dzmax) (int *, double *, int *); double BLASFUNC(qxmax) (int *, double *, int *); float BLASFUNC(smin) (int *, float *, int *); double BLASFUNC(dmin) (int *, double *, int *); double BLASFUNC(qmin) (int *, double *, int *); float BLASFUNC(scmin) (int *, float *, int *); double BLASFUNC(dzmin) (int *, double *, int *); double BLASFUNC(qxmin) (int *, double *, int *); int BLASFUNC(sscal) (int *, float *, float *, int *); int BLASFUNC(dscal) (int *, double *, double *, int *); int BLASFUNC(qscal) (int *, double *, double *, int *); int BLASFUNC(cscal) (int *, float *, float *, int *); int BLASFUNC(zscal) (int *, double *, double *, int *); int BLASFUNC(xscal) (int *, double *, double *, int *); int BLASFUNC(csscal)(int *, float *, float *, int *); int BLASFUNC(zdscal)(int *, double *, double *, int *); int BLASFUNC(xqscal)(int *, double *, double *, int *); float BLASFUNC(snrm2) (int *, float *, int *); float BLASFUNC(scnrm2)(int *, float *, int *); double BLASFUNC(dnrm2) (int *, double *, int *); double BLASFUNC(qnrm2) (int *, double *, int *); double BLASFUNC(dznrm2)(int *, double *, int *); double BLASFUNC(qxnrm2)(int *, double *, int *); int BLASFUNC(srot) (int *, float *, int *, float *, int *, float *, float *); int BLASFUNC(drot) (int *, double *, int *, double *, int *, double *, double *); int BLASFUNC(qrot) (int *, double *, int *, double *, int *, double *, double *); int BLASFUNC(csrot) (int *, float *, int *, float *, int *, float *, float *); int BLASFUNC(zdrot) (int *, double *, int *, double *, int *, double *, double *); int BLASFUNC(xqrot) (int *, double *, int *, double *, int *, double *, double *); int BLASFUNC(srotg) (float *, float *, float *, float *); int BLASFUNC(drotg) (double *, double *, double *, double *); int BLASFUNC(qrotg) (double *, double *, double *, double *); int BLASFUNC(crotg) (float *, float *, float *, float *); int BLASFUNC(zrotg) (double *, double *, double *, double *); int BLASFUNC(xrotg) (double *, double *, double *, double *); int BLASFUNC(srotmg)(float *, float *, float *, float *, float *); int BLASFUNC(drotmg)(double *, double *, double *, double *, double *); int BLASFUNC(srotm) (int *, float *, int *, float *, int *, float *); int BLASFUNC(drotm) (int *, double *, int *, double *, int *, double *); int BLASFUNC(qrotm) (int *, double *, int *, double *, int *, double *); /* Level 2 routines */ int BLASFUNC(sger)(int *, int *, float *, float *, int *, float *, int *, float *, int *); int BLASFUNC(dger)(int *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(qger)(int *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(cgeru)(int *, int *, float *, float *, int *, float *, int *, float *, int *); int BLASFUNC(cgerc)(int *, int *, float *, float *, int *, float *, int *, float *, int *); int BLASFUNC(zgeru)(int *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(zgerc)(int *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(xgeru)(int *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(xgerc)(int *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(sgemv)(char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(dgemv)(char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(qgemv)(char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(cgemv)(char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zgemv)(char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xgemv)(char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(strsv) (char *, char *, char *, int *, float *, int *, float *, int *); int BLASFUNC(dtrsv) (char *, char *, char *, int *, double *, int *, double *, int *); int BLASFUNC(qtrsv) (char *, char *, char *, int *, double *, int *, double *, int *); int BLASFUNC(ctrsv) (char *, char *, char *, int *, float *, int *, float *, int *); int BLASFUNC(ztrsv) (char *, char *, char *, int *, double *, int *, double *, int *); int BLASFUNC(xtrsv) (char *, char *, char *, int *, double *, int *, double *, int *); int BLASFUNC(stpsv) (char *, char *, char *, int *, float *, float *, int *); int BLASFUNC(dtpsv) (char *, char *, char *, int *, double *, double *, int *); int BLASFUNC(qtpsv) (char *, char *, char *, int *, double *, double *, int *); int BLASFUNC(ctpsv) (char *, char *, char *, int *, float *, float *, int *); int BLASFUNC(ztpsv) (char *, char *, char *, int *, double *, double *, int *); int BLASFUNC(xtpsv) (char *, char *, char *, int *, double *, double *, int *); int BLASFUNC(strmv) (char *, char *, char *, int *, float *, int *, float *, int *); int BLASFUNC(dtrmv) (char *, char *, char *, int *, double *, int *, double *, int *); int BLASFUNC(qtrmv) (char *, char *, char *, int *, double *, int *, double *, int *); int BLASFUNC(ctrmv) (char *, char *, char *, int *, float *, int *, float *, int *); int BLASFUNC(ztrmv) (char *, char *, char *, int *, double *, int *, double *, int *); int BLASFUNC(xtrmv) (char *, char *, char *, int *, double *, int *, double *, int *); int BLASFUNC(stpmv) (char *, char *, char *, int *, float *, float *, int *); int BLASFUNC(dtpmv) (char *, char *, char *, int *, double *, double *, int *); int BLASFUNC(qtpmv) (char *, char *, char *, int *, double *, double *, int *); int BLASFUNC(ctpmv) (char *, char *, char *, int *, float *, float *, int *); int BLASFUNC(ztpmv) (char *, char *, char *, int *, double *, double *, int *); int BLASFUNC(xtpmv) (char *, char *, char *, int *, double *, double *, int *); int BLASFUNC(stbmv) (char *, char *, char *, int *, int *, float *, int *, float *, int *); int BLASFUNC(dtbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); int BLASFUNC(qtbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); int BLASFUNC(ctbmv) (char *, char *, char *, int *, int *, float *, int *, float *, int *); int BLASFUNC(ztbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); int BLASFUNC(xtbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); int BLASFUNC(stbsv) (char *, char *, char *, int *, int *, float *, int *, float *, int *); int BLASFUNC(dtbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); int BLASFUNC(qtbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); int BLASFUNC(ctbsv) (char *, char *, char *, int *, int *, float *, int *, float *, int *); int BLASFUNC(ztbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); int BLASFUNC(xtbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); int BLASFUNC(ssymv) (char *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(dsymv) (char *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(qsymv) (char *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(csymv) (char *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zsymv) (char *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xsymv) (char *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(sspmv) (char *, int *, float *, float *, float *, int *, float *, float *, int *); int BLASFUNC(dspmv) (char *, int *, double *, double *, double *, int *, double *, double *, int *); int BLASFUNC(qspmv) (char *, int *, double *, double *, double *, int *, double *, double *, int *); int BLASFUNC(cspmv) (char *, int *, float *, float *, float *, int *, float *, float *, int *); int BLASFUNC(zspmv) (char *, int *, double *, double *, double *, int *, double *, double *, int *); int BLASFUNC(xspmv) (char *, int *, double *, double *, double *, int *, double *, double *, int *); int BLASFUNC(ssyr) (char *, int *, float *, float *, int *, float *, int *); int BLASFUNC(dsyr) (char *, int *, double *, double *, int *, double *, int *); int BLASFUNC(qsyr) (char *, int *, double *, double *, int *, double *, int *); int BLASFUNC(csyr) (char *, int *, float *, float *, int *, float *, int *); int BLASFUNC(zsyr) (char *, int *, double *, double *, int *, double *, int *); int BLASFUNC(xsyr) (char *, int *, double *, double *, int *, double *, int *); int BLASFUNC(ssyr2) (char *, int *, float *, float *, int *, float *, int *, float *, int *); int BLASFUNC(dsyr2) (char *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(qsyr2) (char *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(csyr2) (char *, int *, float *, float *, int *, float *, int *, float *, int *); int BLASFUNC(zsyr2) (char *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(xsyr2) (char *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(sspr) (char *, int *, float *, float *, int *, float *); int BLASFUNC(dspr) (char *, int *, double *, double *, int *, double *); int BLASFUNC(qspr) (char *, int *, double *, double *, int *, double *); int BLASFUNC(cspr) (char *, int *, float *, float *, int *, float *); int BLASFUNC(zspr) (char *, int *, double *, double *, int *, double *); int BLASFUNC(xspr) (char *, int *, double *, double *, int *, double *); int BLASFUNC(sspr2) (char *, int *, float *, float *, int *, float *, int *, float *); int BLASFUNC(dspr2) (char *, int *, double *, double *, int *, double *, int *, double *); int BLASFUNC(qspr2) (char *, int *, double *, double *, int *, double *, int *, double *); int BLASFUNC(cspr2) (char *, int *, float *, float *, int *, float *, int *, float *); int BLASFUNC(zspr2) (char *, int *, double *, double *, int *, double *, int *, double *); int BLASFUNC(xspr2) (char *, int *, double *, double *, int *, double *, int *, double *); int BLASFUNC(cher) (char *, int *, float *, float *, int *, float *, int *); int BLASFUNC(zher) (char *, int *, double *, double *, int *, double *, int *); int BLASFUNC(xher) (char *, int *, double *, double *, int *, double *, int *); int BLASFUNC(chpr) (char *, int *, float *, float *, int *, float *); int BLASFUNC(zhpr) (char *, int *, double *, double *, int *, double *); int BLASFUNC(xhpr) (char *, int *, double *, double *, int *, double *); int BLASFUNC(cher2) (char *, int *, float *, float *, int *, float *, int *, float *, int *); int BLASFUNC(zher2) (char *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(xher2) (char *, int *, double *, double *, int *, double *, int *, double *, int *); int BLASFUNC(chpr2) (char *, int *, float *, float *, int *, float *, int *, float *); int BLASFUNC(zhpr2) (char *, int *, double *, double *, int *, double *, int *, double *); int BLASFUNC(xhpr2) (char *, int *, double *, double *, int *, double *, int *, double *); int BLASFUNC(chemv) (char *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zhemv) (char *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xhemv) (char *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(chpmv) (char *, int *, float *, float *, float *, int *, float *, float *, int *); int BLASFUNC(zhpmv) (char *, int *, double *, double *, double *, int *, double *, double *, int *); int BLASFUNC(xhpmv) (char *, int *, double *, double *, double *, int *, double *, double *, int *); int BLASFUNC(snorm)(char *, int *, int *, float *, int *); int BLASFUNC(dnorm)(char *, int *, int *, double *, int *); int BLASFUNC(cnorm)(char *, int *, int *, float *, int *); int BLASFUNC(znorm)(char *, int *, int *, double *, int *); int BLASFUNC(sgbmv)(char *, int *, int *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(dgbmv)(char *, int *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(qgbmv)(char *, int *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(cgbmv)(char *, int *, int *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zgbmv)(char *, int *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xgbmv)(char *, int *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(ssbmv)(char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(dsbmv)(char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(qsbmv)(char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(csbmv)(char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zsbmv)(char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xsbmv)(char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(chbmv)(char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zhbmv)(char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xhbmv)(char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); /* Level 3 routines */ int BLASFUNC(sgemm)(char *, char *, int *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(dgemm)(char *, char *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(qgemm)(char *, char *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(cgemm)(char *, char *, int *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zgemm)(char *, char *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xgemm)(char *, char *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(cgemm3m)(char *, char *, int *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zgemm3m)(char *, char *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xgemm3m)(char *, char *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(sge2mm)(char *, char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(dge2mm)(char *, char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(cge2mm)(char *, char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zge2mm)(char *, char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(strsm)(char *, char *, char *, char *, int *, int *, float *, float *, int *, float *, int *); int BLASFUNC(dtrsm)(char *, char *, char *, char *, int *, int *, double *, double *, int *, double *, int *); int BLASFUNC(qtrsm)(char *, char *, char *, char *, int *, int *, double *, double *, int *, double *, int *); int BLASFUNC(ctrsm)(char *, char *, char *, char *, int *, int *, float *, float *, int *, float *, int *); int BLASFUNC(ztrsm)(char *, char *, char *, char *, int *, int *, double *, double *, int *, double *, int *); int BLASFUNC(xtrsm)(char *, char *, char *, char *, int *, int *, double *, double *, int *, double *, int *); int BLASFUNC(strmm)(char *, char *, char *, char *, int *, int *, float *, float *, int *, float *, int *); int BLASFUNC(dtrmm)(char *, char *, char *, char *, int *, int *, double *, double *, int *, double *, int *); int BLASFUNC(qtrmm)(char *, char *, char *, char *, int *, int *, double *, double *, int *, double *, int *); int BLASFUNC(ctrmm)(char *, char *, char *, char *, int *, int *, float *, float *, int *, float *, int *); int BLASFUNC(ztrmm)(char *, char *, char *, char *, int *, int *, double *, double *, int *, double *, int *); int BLASFUNC(xtrmm)(char *, char *, char *, char *, int *, int *, double *, double *, int *, double *, int *); int BLASFUNC(ssymm)(char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(dsymm)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(qsymm)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(csymm)(char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zsymm)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xsymm)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(csymm3m)(char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zsymm3m)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xsymm3m)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(ssyrk)(char *, char *, int *, int *, float *, float *, int *, float *, float *, int *); int BLASFUNC(dsyrk)(char *, char *, int *, int *, double *, double *, int *, double *, double *, int *); int BLASFUNC(qsyrk)(char *, char *, int *, int *, double *, double *, int *, double *, double *, int *); int BLASFUNC(csyrk)(char *, char *, int *, int *, float *, float *, int *, float *, float *, int *); int BLASFUNC(zsyrk)(char *, char *, int *, int *, double *, double *, int *, double *, double *, int *); int BLASFUNC(xsyrk)(char *, char *, int *, int *, double *, double *, int *, double *, double *, int *); int BLASFUNC(ssyr2k)(char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(dsyr2k)(char *, char *, int *, int *, double *, double *, int *, double*, int *, double *, double *, int *); int BLASFUNC(qsyr2k)(char *, char *, int *, int *, double *, double *, int *, double*, int *, double *, double *, int *); int BLASFUNC(csyr2k)(char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zsyr2k)(char *, char *, int *, int *, double *, double *, int *, double*, int *, double *, double *, int *); int BLASFUNC(xsyr2k)(char *, char *, int *, int *, double *, double *, int *, double*, int *, double *, double *, int *); int BLASFUNC(chemm)(char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zhemm)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xhemm)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(chemm3m)(char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zhemm3m)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(xhemm3m)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *); int BLASFUNC(cherk)(char *, char *, int *, int *, float *, float *, int *, float *, float *, int *); int BLASFUNC(zherk)(char *, char *, int *, int *, double *, double *, int *, double *, double *, int *); int BLASFUNC(xherk)(char *, char *, int *, int *, double *, double *, int *, double *, double *, int *); int BLASFUNC(cher2k)(char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zher2k)(char *, char *, int *, int *, double *, double *, int *, double*, int *, double *, double *, int *); int BLASFUNC(xher2k)(char *, char *, int *, int *, double *, double *, int *, double*, int *, double *, double *, int *); int BLASFUNC(cher2m)(char *, char *, char *, int *, int *, float *, float *, int *, float *, int *, float *, float *, int *); int BLASFUNC(zher2m)(char *, char *, char *, int *, int *, double *, double *, int *, double*, int *, double *, double *, int *); int BLASFUNC(xher2m)(char *, char *, char *, int *, int *, double *, double *, int *, double*, int *, double *, double *, int *); int BLASFUNC(sgemt)(char *, int *, int *, float *, float *, int *, float *, int *); int BLASFUNC(dgemt)(char *, int *, int *, double *, double *, int *, double *, int *); int BLASFUNC(cgemt)(char *, int *, int *, float *, float *, int *, float *, int *); int BLASFUNC(zgemt)(char *, int *, int *, double *, double *, int *, double *, int *); int BLASFUNC(sgema)(char *, char *, int *, int *, float *, float *, int *, float *, float *, int *, float *, int *); int BLASFUNC(dgema)(char *, char *, int *, int *, double *, double *, int *, double*, double *, int *, double*, int *); int BLASFUNC(cgema)(char *, char *, int *, int *, float *, float *, int *, float *, float *, int *, float *, int *); int BLASFUNC(zgema)(char *, char *, int *, int *, double *, double *, int *, double*, double *, int *, double*, int *); int BLASFUNC(sgems)(char *, char *, int *, int *, float *, float *, int *, float *, float *, int *, float *, int *); int BLASFUNC(dgems)(char *, char *, int *, int *, double *, double *, int *, double*, double *, int *, double*, int *); int BLASFUNC(cgems)(char *, char *, int *, int *, float *, float *, int *, float *, float *, int *, float *, int *); int BLASFUNC(zgems)(char *, char *, int *, int *, double *, double *, int *, double*, double *, int *, double*, int *); int BLASFUNC(sgetf2)(int *, int *, float *, int *, int *, int *); int BLASFUNC(dgetf2)(int *, int *, double *, int *, int *, int *); int BLASFUNC(qgetf2)(int *, int *, double *, int *, int *, int *); int BLASFUNC(cgetf2)(int *, int *, float *, int *, int *, int *); int BLASFUNC(zgetf2)(int *, int *, double *, int *, int *, int *); int BLASFUNC(xgetf2)(int *, int *, double *, int *, int *, int *); int BLASFUNC(sgetrf)(int *, int *, float *, int *, int *, int *); int BLASFUNC(dgetrf)(int *, int *, double *, int *, int *, int *); int BLASFUNC(qgetrf)(int *, int *, double *, int *, int *, int *); int BLASFUNC(cgetrf)(int *, int *, float *, int *, int *, int *); int BLASFUNC(zgetrf)(int *, int *, double *, int *, int *, int *); int BLASFUNC(xgetrf)(int *, int *, double *, int *, int *, int *); int BLASFUNC(slaswp)(int *, float *, int *, int *, int *, int *, int *); int BLASFUNC(dlaswp)(int *, double *, int *, int *, int *, int *, int *); int BLASFUNC(qlaswp)(int *, double *, int *, int *, int *, int *, int *); int BLASFUNC(claswp)(int *, float *, int *, int *, int *, int *, int *); int BLASFUNC(zlaswp)(int *, double *, int *, int *, int *, int *, int *); int BLASFUNC(xlaswp)(int *, double *, int *, int *, int *, int *, int *); int BLASFUNC(sgetrs)(char *, int *, int *, float *, int *, int *, float *, int *, int *); int BLASFUNC(dgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *); int BLASFUNC(qgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *); int BLASFUNC(cgetrs)(char *, int *, int *, float *, int *, int *, float *, int *, int *); int BLASFUNC(zgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *); int BLASFUNC(xgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *); int BLASFUNC(sgesv)(int *, int *, float *, int *, int *, float *, int *, int *); int BLASFUNC(dgesv)(int *, int *, double *, int *, int *, double*, int *, int *); int BLASFUNC(qgesv)(int *, int *, double *, int *, int *, double*, int *, int *); int BLASFUNC(cgesv)(int *, int *, float *, int *, int *, float *, int *, int *); int BLASFUNC(zgesv)(int *, int *, double *, int *, int *, double*, int *, int *); int BLASFUNC(xgesv)(int *, int *, double *, int *, int *, double*, int *, int *); int BLASFUNC(spotf2)(char *, int *, float *, int *, int *); int BLASFUNC(dpotf2)(char *, int *, double *, int *, int *); int BLASFUNC(qpotf2)(char *, int *, double *, int *, int *); int BLASFUNC(cpotf2)(char *, int *, float *, int *, int *); int BLASFUNC(zpotf2)(char *, int *, double *, int *, int *); int BLASFUNC(xpotf2)(char *, int *, double *, int *, int *); int BLASFUNC(spotrf)(char *, int *, float *, int *, int *); int BLASFUNC(dpotrf)(char *, int *, double *, int *, int *); int BLASFUNC(qpotrf)(char *, int *, double *, int *, int *); int BLASFUNC(cpotrf)(char *, int *, float *, int *, int *); int BLASFUNC(zpotrf)(char *, int *, double *, int *, int *); int BLASFUNC(xpotrf)(char *, int *, double *, int *, int *); int BLASFUNC(slauu2)(char *, int *, float *, int *, int *); int BLASFUNC(dlauu2)(char *, int *, double *, int *, int *); int BLASFUNC(qlauu2)(char *, int *, double *, int *, int *); int BLASFUNC(clauu2)(char *, int *, float *, int *, int *); int BLASFUNC(zlauu2)(char *, int *, double *, int *, int *); int BLASFUNC(xlauu2)(char *, int *, double *, int *, int *); int BLASFUNC(slauum)(char *, int *, float *, int *, int *); int BLASFUNC(dlauum)(char *, int *, double *, int *, int *); int BLASFUNC(qlauum)(char *, int *, double *, int *, int *); int BLASFUNC(clauum)(char *, int *, float *, int *, int *); int BLASFUNC(zlauum)(char *, int *, double *, int *, int *); int BLASFUNC(xlauum)(char *, int *, double *, int *, int *); int BLASFUNC(strti2)(char *, char *, int *, float *, int *, int *); int BLASFUNC(dtrti2)(char *, char *, int *, double *, int *, int *); int BLASFUNC(qtrti2)(char *, char *, int *, double *, int *, int *); int BLASFUNC(ctrti2)(char *, char *, int *, float *, int *, int *); int BLASFUNC(ztrti2)(char *, char *, int *, double *, int *, int *); int BLASFUNC(xtrti2)(char *, char *, int *, double *, int *, int *); int BLASFUNC(strtri)(char *, char *, int *, float *, int *, int *); int BLASFUNC(dtrtri)(char *, char *, int *, double *, int *, int *); int BLASFUNC(qtrtri)(char *, char *, int *, double *, int *, int *); int BLASFUNC(ctrtri)(char *, char *, int *, float *, int *, int *); int BLASFUNC(ztrtri)(char *, char *, int *, double *, int *, int *); int BLASFUNC(xtrtri)(char *, char *, int *, double *, int *, int *); int BLASFUNC(spotri)(char *, int *, float *, int *, int *); int BLASFUNC(dpotri)(char *, int *, double *, int *, int *); int BLASFUNC(qpotri)(char *, int *, double *, int *, int *); int BLASFUNC(cpotri)(char *, int *, float *, int *, int *); int BLASFUNC(zpotri)(char *, int *, double *, int *, int *); int BLASFUNC(xpotri)(char *, int *, double *, int *, int *); #ifdef __cplusplus } #endif #endif antimony/lib/fab/vendor/Eigen/src/misc/Solve.h0000644000175000017500000000466513341130426020521 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MISC_SOLVE_H #define EIGEN_MISC_SOLVE_H namespace Eigen { namespace internal { /** \class solve_retval_base * */ template struct traits > { typedef typename DecompositionType::MatrixType MatrixType; typedef Matrix ReturnType; }; template struct solve_retval_base : public ReturnByValue > { typedef typename remove_all::type RhsNestedCleaned; typedef _DecompositionType DecompositionType; typedef ReturnByValue Base; typedef typename Base::Index Index; solve_retval_base(const DecompositionType& dec, const Rhs& rhs) : m_dec(dec), m_rhs(rhs) {} inline Index rows() const { return m_dec.cols(); } inline Index cols() const { return m_rhs.cols(); } inline const DecompositionType& dec() const { return m_dec; } inline const RhsNestedCleaned& rhs() const { return m_rhs; } template inline void evalTo(Dest& dst) const { static_cast*>(this)->evalTo(dst); } protected: const DecompositionType& m_dec; typename Rhs::Nested m_rhs; }; } // end namespace internal #define EIGEN_MAKE_SOLVE_HELPERS(DecompositionType,Rhs) \ typedef typename DecompositionType::MatrixType MatrixType; \ typedef typename MatrixType::Scalar Scalar; \ typedef typename MatrixType::RealScalar RealScalar; \ typedef typename MatrixType::Index Index; \ typedef Eigen::internal::solve_retval_base Base; \ using Base::dec; \ using Base::rhs; \ using Base::rows; \ using Base::cols; \ solve_retval(const DecompositionType& dec, const Rhs& rhs) \ : Base(dec, rhs) {} } // end namespace Eigen #endif // EIGEN_MISC_SOLVE_H antimony/lib/fab/vendor/Eigen/src/misc/Kernel.h0000644000175000017500000000540113341130426020636 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MISC_KERNEL_H #define EIGEN_MISC_KERNEL_H namespace Eigen { namespace internal { /** \class kernel_retval_base * */ template struct traits > { typedef typename DecompositionType::MatrixType MatrixType; typedef Matrix< typename MatrixType::Scalar, MatrixType::ColsAtCompileTime, // the number of rows in the "kernel matrix" // is the number of cols of the original matrix // so that the product "matrix * kernel = zero" makes sense Dynamic, // we don't know at compile-time the dimension of the kernel MatrixType::Options, MatrixType::MaxColsAtCompileTime, // see explanation for 2nd template parameter MatrixType::MaxColsAtCompileTime // the kernel is a subspace of the domain space, // whose dimension is the number of columns of the original matrix > ReturnType; }; template struct kernel_retval_base : public ReturnByValue > { typedef _DecompositionType DecompositionType; typedef ReturnByValue Base; typedef typename Base::Index Index; kernel_retval_base(const DecompositionType& dec) : m_dec(dec), m_rank(dec.rank()), m_cols(m_rank==dec.cols() ? 1 : dec.cols() - m_rank) {} inline Index rows() const { return m_dec.cols(); } inline Index cols() const { return m_cols; } inline Index rank() const { return m_rank; } inline const DecompositionType& dec() const { return m_dec; } template inline void evalTo(Dest& dst) const { static_cast*>(this)->evalTo(dst); } protected: const DecompositionType& m_dec; Index m_rank, m_cols; }; } // end namespace internal #define EIGEN_MAKE_KERNEL_HELPERS(DecompositionType) \ typedef typename DecompositionType::MatrixType MatrixType; \ typedef typename MatrixType::Scalar Scalar; \ typedef typename MatrixType::RealScalar RealScalar; \ typedef typename MatrixType::Index Index; \ typedef Eigen::internal::kernel_retval_base Base; \ using Base::dec; \ using Base::rank; \ using Base::rows; \ using Base::cols; \ kernel_retval(const DecompositionType& dec) : Base(dec) {} } // end namespace Eigen #endif // EIGEN_MISC_KERNEL_H antimony/lib/fab/vendor/Eigen/src/misc/SparseSolve.h0000644000175000017500000001113113341130426021661 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_SOLVE_H #define EIGEN_SPARSE_SOLVE_H namespace Eigen { namespace internal { template struct sparse_solve_retval_base; template struct sparse_solve_retval; template struct traits > { typedef typename DecompositionType::MatrixType MatrixType; typedef SparseMatrix ReturnType; }; template struct sparse_solve_retval_base : public ReturnByValue > { typedef typename remove_all::type RhsNestedCleaned; typedef _DecompositionType DecompositionType; typedef ReturnByValue Base; typedef typename Base::Index Index; sparse_solve_retval_base(const DecompositionType& dec, const Rhs& rhs) : m_dec(dec), m_rhs(rhs) {} inline Index rows() const { return m_dec.cols(); } inline Index cols() const { return m_rhs.cols(); } inline const DecompositionType& dec() const { return m_dec; } inline const RhsNestedCleaned& rhs() const { return m_rhs; } template inline void evalTo(Dest& dst) const { static_cast*>(this)->evalTo(dst); } protected: template inline void defaultEvalTo(SparseMatrix& dst) const { // we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix. static const int NbColsAtOnce = 4; int rhsCols = m_rhs.cols(); int size = m_rhs.rows(); Eigen::Matrix tmp(size,rhsCols); Eigen::Matrix tmpX(size,rhsCols); for(int k=0; k(rhsCols-k, NbColsAtOnce); tmp.leftCols(actualCols) = m_rhs.middleCols(k,actualCols); tmpX.leftCols(actualCols) = m_dec.solve(tmp.leftCols(actualCols)); dst.middleCols(k,actualCols) = tmpX.leftCols(actualCols).sparseView(); } } const DecompositionType& m_dec; typename Rhs::Nested m_rhs; }; #define EIGEN_MAKE_SPARSE_SOLVE_HELPERS(DecompositionType,Rhs) \ typedef typename DecompositionType::MatrixType MatrixType; \ typedef typename MatrixType::Scalar Scalar; \ typedef typename MatrixType::RealScalar RealScalar; \ typedef typename MatrixType::Index Index; \ typedef Eigen::internal::sparse_solve_retval_base Base; \ using Base::dec; \ using Base::rhs; \ using Base::rows; \ using Base::cols; \ sparse_solve_retval(const DecompositionType& dec, const Rhs& rhs) \ : Base(dec, rhs) {} template struct solve_retval_with_guess; template struct traits > { typedef typename DecompositionType::MatrixType MatrixType; typedef Matrix ReturnType; }; template struct solve_retval_with_guess : public ReturnByValue > { typedef typename DecompositionType::Index Index; solve_retval_with_guess(const DecompositionType& dec, const Rhs& rhs, const Guess& guess) : m_dec(dec), m_rhs(rhs), m_guess(guess) {} inline Index rows() const { return m_dec.cols(); } inline Index cols() const { return m_rhs.cols(); } template inline void evalTo(Dest& dst) const { dst = m_guess; m_dec._solveWithGuess(m_rhs,dst); } protected: const DecompositionType& m_dec; const typename Rhs::Nested m_rhs; const typename Guess::Nested m_guess; }; } // namepsace internal } // end namespace Eigen #endif // EIGEN_SPARSE_SOLVE_H antimony/lib/fab/vendor/Eigen/src/misc/Image.h0000644000175000017500000000566513341130426020454 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MISC_IMAGE_H #define EIGEN_MISC_IMAGE_H namespace Eigen { namespace internal { /** \class image_retval_base * */ template struct traits > { typedef typename DecompositionType::MatrixType MatrixType; typedef Matrix< typename MatrixType::Scalar, MatrixType::RowsAtCompileTime, // the image is a subspace of the destination space, whose // dimension is the number of rows of the original matrix Dynamic, // we don't know at compile time the dimension of the image (the rank) MatrixType::Options, MatrixType::MaxRowsAtCompileTime, // the image matrix will consist of columns from the original matrix, MatrixType::MaxColsAtCompileTime // so it has the same number of rows and at most as many columns. > ReturnType; }; template struct image_retval_base : public ReturnByValue > { typedef _DecompositionType DecompositionType; typedef typename DecompositionType::MatrixType MatrixType; typedef ReturnByValue Base; typedef typename Base::Index Index; image_retval_base(const DecompositionType& dec, const MatrixType& originalMatrix) : m_dec(dec), m_rank(dec.rank()), m_cols(m_rank == 0 ? 1 : m_rank), m_originalMatrix(originalMatrix) {} inline Index rows() const { return m_dec.rows(); } inline Index cols() const { return m_cols; } inline Index rank() const { return m_rank; } inline const DecompositionType& dec() const { return m_dec; } inline const MatrixType& originalMatrix() const { return m_originalMatrix; } template inline void evalTo(Dest& dst) const { static_cast*>(this)->evalTo(dst); } protected: const DecompositionType& m_dec; Index m_rank, m_cols; const MatrixType& m_originalMatrix; }; } // end namespace internal #define EIGEN_MAKE_IMAGE_HELPERS(DecompositionType) \ typedef typename DecompositionType::MatrixType MatrixType; \ typedef typename MatrixType::Scalar Scalar; \ typedef typename MatrixType::RealScalar RealScalar; \ typedef typename MatrixType::Index Index; \ typedef Eigen::internal::image_retval_base Base; \ using Base::dec; \ using Base::originalMatrix; \ using Base::rank; \ using Base::rows; \ using Base::cols; \ image_retval(const DecompositionType& dec, const MatrixType& originalMatrix) \ : Base(dec, originalMatrix) {} } // end namespace Eigen #endif // EIGEN_MISC_IMAGE_H antimony/lib/fab/vendor/Eigen/src/Geometry/0000755000175000017500000000000013341130426020105 5ustar tiagotiagoantimony/lib/fab/vendor/Eigen/src/Geometry/CMakeLists.txt0000644000175000017500000000026113341130426022644 0ustar tiagotiagoFILE(GLOB Eigen_Geometry_SRCS "*.h") INSTALL(FILES ${Eigen_Geometry_SRCS} DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Geometry COMPONENT Devel ) ADD_SUBDIRECTORY(arch) antimony/lib/fab/vendor/Eigen/src/Geometry/OrthoMethods.h0000644000175000017500000002017613341130426022703 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // Copyright (C) 2006-2008 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ORTHOMETHODS_H #define EIGEN_ORTHOMETHODS_H namespace Eigen { /** \geometry_module * * \returns the cross product of \c *this and \a other * * Here is a very good explanation of cross-product: http://xkcd.com/199/ * \sa MatrixBase::cross3() */ template template inline typename MatrixBase::template cross_product_return_type::type MatrixBase::cross(const MatrixBase& other) const { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived,3) EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,3) // Note that there is no need for an expression here since the compiler // optimize such a small temporary very well (even within a complex expression) typename internal::nested::type lhs(derived()); typename internal::nested::type rhs(other.derived()); return typename cross_product_return_type::type( numext::conj(lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1)), numext::conj(lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2)), numext::conj(lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0)) ); } namespace internal { template< int Arch,typename VectorLhs,typename VectorRhs, typename Scalar = typename VectorLhs::Scalar, bool Vectorizable = bool((VectorLhs::Flags&VectorRhs::Flags)&PacketAccessBit)> struct cross3_impl { static inline typename internal::plain_matrix_type::type run(const VectorLhs& lhs, const VectorRhs& rhs) { return typename internal::plain_matrix_type::type( numext::conj(lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1)), numext::conj(lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2)), numext::conj(lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0)), 0 ); } }; } /** \geometry_module * * \returns the cross product of \c *this and \a other using only the x, y, and z coefficients * * The size of \c *this and \a other must be four. This function is especially useful * when using 4D vectors instead of 3D ones to get advantage of SSE/AltiVec vectorization. * * \sa MatrixBase::cross() */ template template inline typename MatrixBase::PlainObject MatrixBase::cross3(const MatrixBase& other) const { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived,4) EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,4) typedef typename internal::nested::type DerivedNested; typedef typename internal::nested::type OtherDerivedNested; DerivedNested lhs(derived()); OtherDerivedNested rhs(other.derived()); return internal::cross3_impl::type, typename internal::remove_all::type>::run(lhs,rhs); } /** \returns a matrix expression of the cross product of each column or row * of the referenced expression with the \a other vector. * * The referenced matrix must have one dimension equal to 3. * The result matrix has the same dimensions than the referenced one. * * \geometry_module * * \sa MatrixBase::cross() */ template template const typename VectorwiseOp::CrossReturnType VectorwiseOp::cross(const MatrixBase& other) const { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,3) EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) CrossReturnType res(_expression().rows(),_expression().cols()); if(Direction==Vertical) { eigen_assert(CrossReturnType::RowsAtCompileTime==3 && "the matrix must have exactly 3 rows"); res.row(0) = (_expression().row(1) * other.coeff(2) - _expression().row(2) * other.coeff(1)).conjugate(); res.row(1) = (_expression().row(2) * other.coeff(0) - _expression().row(0) * other.coeff(2)).conjugate(); res.row(2) = (_expression().row(0) * other.coeff(1) - _expression().row(1) * other.coeff(0)).conjugate(); } else { eigen_assert(CrossReturnType::ColsAtCompileTime==3 && "the matrix must have exactly 3 columns"); res.col(0) = (_expression().col(1) * other.coeff(2) - _expression().col(2) * other.coeff(1)).conjugate(); res.col(1) = (_expression().col(2) * other.coeff(0) - _expression().col(0) * other.coeff(2)).conjugate(); res.col(2) = (_expression().col(0) * other.coeff(1) - _expression().col(1) * other.coeff(0)).conjugate(); } return res; } namespace internal { template struct unitOrthogonal_selector { typedef typename plain_matrix_type::type VectorType; typedef typename traits::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename Derived::Index Index; typedef Matrix Vector2; static inline VectorType run(const Derived& src) { VectorType perp = VectorType::Zero(src.size()); Index maxi = 0; Index sndi = 0; src.cwiseAbs().maxCoeff(&maxi); if (maxi==0) sndi = 1; RealScalar invnm = RealScalar(1)/(Vector2() << src.coeff(sndi),src.coeff(maxi)).finished().norm(); perp.coeffRef(maxi) = -numext::conj(src.coeff(sndi)) * invnm; perp.coeffRef(sndi) = numext::conj(src.coeff(maxi)) * invnm; return perp; } }; template struct unitOrthogonal_selector { typedef typename plain_matrix_type::type VectorType; typedef typename traits::Scalar Scalar; typedef typename NumTraits::Real RealScalar; static inline VectorType run(const Derived& src) { VectorType perp; /* Let us compute the crossed product of *this with a vector * that is not too close to being colinear to *this. */ /* unless the x and y coords are both close to zero, we can * simply take ( -y, x, 0 ) and normalize it. */ if((!isMuchSmallerThan(src.x(), src.z())) || (!isMuchSmallerThan(src.y(), src.z()))) { RealScalar invnm = RealScalar(1)/src.template head<2>().norm(); perp.coeffRef(0) = -numext::conj(src.y())*invnm; perp.coeffRef(1) = numext::conj(src.x())*invnm; perp.coeffRef(2) = 0; } /* if both x and y are close to zero, then the vector is close * to the z-axis, so it's far from colinear to the x-axis for instance. * So we take the crossed product with (1,0,0) and normalize it. */ else { RealScalar invnm = RealScalar(1)/src.template tail<2>().norm(); perp.coeffRef(0) = 0; perp.coeffRef(1) = -numext::conj(src.z())*invnm; perp.coeffRef(2) = numext::conj(src.y())*invnm; } return perp; } }; template struct unitOrthogonal_selector { typedef typename plain_matrix_type::type VectorType; static inline VectorType run(const Derived& src) { return VectorType(-numext::conj(src.y()), numext::conj(src.x())).normalized(); } }; } // end namespace internal /** \returns a unit vector which is orthogonal to \c *this * * The size of \c *this must be at least 2. If the size is exactly 2, * then the returned vector is a counter clock wise rotation of \c *this, i.e., (-y,x).normalized(). * * \sa cross() */ template typename MatrixBase::PlainObject MatrixBase::unitOrthogonal() const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return internal::unitOrthogonal_selector::run(derived()); } } // end namespace Eigen #endif // EIGEN_ORTHOMETHODS_H antimony/lib/fab/vendor/Eigen/src/Geometry/Translation.h0000644000175000017500000001604113341130426022556 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_TRANSLATION_H #define EIGEN_TRANSLATION_H namespace Eigen { /** \geometry_module \ingroup Geometry_Module * * \class Translation * * \brief Represents a translation transformation * * \param _Scalar the scalar type, i.e., the type of the coefficients. * \param _Dim the dimension of the space, can be a compile time value or Dynamic * * \note This class is not aimed to be used to store a translation transformation, * but rather to make easier the constructions and updates of Transform objects. * * \sa class Scaling, class Transform */ template class Translation { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim) /** dimension of the space */ enum { Dim = _Dim }; /** the scalar type of the coefficients */ typedef _Scalar Scalar; /** corresponding vector type */ typedef Matrix VectorType; /** corresponding linear transformation matrix type */ typedef Matrix LinearMatrixType; /** corresponding affine transformation type */ typedef Transform AffineTransformType; /** corresponding isometric transformation type */ typedef Transform IsometryTransformType; protected: VectorType m_coeffs; public: /** Default constructor without initialization. */ Translation() {} /** */ inline Translation(const Scalar& sx, const Scalar& sy) { eigen_assert(Dim==2); m_coeffs.x() = sx; m_coeffs.y() = sy; } /** */ inline Translation(const Scalar& sx, const Scalar& sy, const Scalar& sz) { eigen_assert(Dim==3); m_coeffs.x() = sx; m_coeffs.y() = sy; m_coeffs.z() = sz; } /** Constructs and initialize the translation transformation from a vector of translation coefficients */ explicit inline Translation(const VectorType& vector) : m_coeffs(vector) {} /** \brief Retruns the x-translation by value. **/ inline Scalar x() const { return m_coeffs.x(); } /** \brief Retruns the y-translation by value. **/ inline Scalar y() const { return m_coeffs.y(); } /** \brief Retruns the z-translation by value. **/ inline Scalar z() const { return m_coeffs.z(); } /** \brief Retruns the x-translation as a reference. **/ inline Scalar& x() { return m_coeffs.x(); } /** \brief Retruns the y-translation as a reference. **/ inline Scalar& y() { return m_coeffs.y(); } /** \brief Retruns the z-translation as a reference. **/ inline Scalar& z() { return m_coeffs.z(); } const VectorType& vector() const { return m_coeffs; } VectorType& vector() { return m_coeffs; } const VectorType& translation() const { return m_coeffs; } VectorType& translation() { return m_coeffs; } /** Concatenates two translation */ inline Translation operator* (const Translation& other) const { return Translation(m_coeffs + other.m_coeffs); } /** Concatenates a translation and a uniform scaling */ inline AffineTransformType operator* (const UniformScaling& other) const; /** Concatenates a translation and a linear transformation */ template inline AffineTransformType operator* (const EigenBase& linear) const; /** Concatenates a translation and a rotation */ template inline IsometryTransformType operator*(const RotationBase& r) const { return *this * IsometryTransformType(r); } /** \returns the concatenation of a linear transformation \a l with the translation \a t */ // its a nightmare to define a templated friend function outside its declaration template friend inline AffineTransformType operator*(const EigenBase& linear, const Translation& t) { AffineTransformType res; res.matrix().setZero(); res.linear() = linear.derived(); res.translation() = linear.derived() * t.m_coeffs; res.matrix().row(Dim).setZero(); res(Dim,Dim) = Scalar(1); return res; } /** Concatenates a translation and a transformation */ template inline Transform operator* (const Transform& t) const { Transform res = t; res.pretranslate(m_coeffs); return res; } /** Applies translation to vector */ inline VectorType operator* (const VectorType& other) const { return m_coeffs + other; } /** \returns the inverse translation (opposite) */ Translation inverse() const { return Translation(-m_coeffs); } Translation& operator=(const Translation& other) { m_coeffs = other.m_coeffs; return *this; } static const Translation Identity() { return Translation(VectorType::Zero()); } /** \returns \c *this with scalar type casted to \a NewScalarType * * Note that if \a NewScalarType is equal to the current scalar type of \c *this * then this function smartly returns a const reference to \c *this. */ template inline typename internal::cast_return_type >::type cast() const { return typename internal::cast_return_type >::type(*this); } /** Copy constructor with scalar type conversion */ template inline explicit Translation(const Translation& other) { m_coeffs = other.vector().template cast(); } /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. * * \sa MatrixBase::isApprox() */ bool isApprox(const Translation& other, typename NumTraits::Real prec = NumTraits::dummy_precision()) const { return m_coeffs.isApprox(other.m_coeffs, prec); } }; /** \addtogroup Geometry_Module */ //@{ typedef Translation Translation2f; typedef Translation Translation2d; typedef Translation Translation3f; typedef Translation Translation3d; //@} template inline typename Translation::AffineTransformType Translation::operator* (const UniformScaling& other) const { AffineTransformType res; res.matrix().setZero(); res.linear().diagonal().fill(other.factor()); res.translation() = m_coeffs; res(Dim,Dim) = Scalar(1); return res; } template template inline typename Translation::AffineTransformType Translation::operator* (const EigenBase& linear) const { AffineTransformType res; res.matrix().setZero(); res.linear() = linear.derived(); res.translation() = m_coeffs; res.matrix().row(Dim).setZero(); res(Dim,Dim) = Scalar(1); return res; } } // end namespace Eigen #endif // EIGEN_TRANSLATION_H antimony/lib/fab/vendor/Eigen/src/Geometry/Quaternion.h0000644000175000017500000007105513341130426022413 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud // Copyright (C) 2009 Mathieu Gautier // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_QUATERNION_H #define EIGEN_QUATERNION_H namespace Eigen { /*************************************************************************** * Definition of QuaternionBase * The implementation is at the end of the file ***************************************************************************/ namespace internal { template struct quaternionbase_assign_impl; } /** \geometry_module \ingroup Geometry_Module * \class QuaternionBase * \brief Base class for quaternion expressions * \tparam Derived derived type (CRTP) * \sa class Quaternion */ template class QuaternionBase : public RotationBase { typedef RotationBase Base; public: using Base::operator*; using Base::derived; typedef typename internal::traits::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename internal::traits::Coefficients Coefficients; enum { Flags = Eigen::internal::traits::Flags }; // typedef typename Matrix Coefficients; /** the type of a 3D vector */ typedef Matrix Vector3; /** the equivalent rotation matrix type */ typedef Matrix Matrix3; /** the equivalent angle-axis type */ typedef AngleAxis AngleAxisType; /** \returns the \c x coefficient */ inline Scalar x() const { return this->derived().coeffs().coeff(0); } /** \returns the \c y coefficient */ inline Scalar y() const { return this->derived().coeffs().coeff(1); } /** \returns the \c z coefficient */ inline Scalar z() const { return this->derived().coeffs().coeff(2); } /** \returns the \c w coefficient */ inline Scalar w() const { return this->derived().coeffs().coeff(3); } /** \returns a reference to the \c x coefficient */ inline Scalar& x() { return this->derived().coeffs().coeffRef(0); } /** \returns a reference to the \c y coefficient */ inline Scalar& y() { return this->derived().coeffs().coeffRef(1); } /** \returns a reference to the \c z coefficient */ inline Scalar& z() { return this->derived().coeffs().coeffRef(2); } /** \returns a reference to the \c w coefficient */ inline Scalar& w() { return this->derived().coeffs().coeffRef(3); } /** \returns a read-only vector expression of the imaginary part (x,y,z) */ inline const VectorBlock vec() const { return coeffs().template head<3>(); } /** \returns a vector expression of the imaginary part (x,y,z) */ inline VectorBlock vec() { return coeffs().template head<3>(); } /** \returns a read-only vector expression of the coefficients (x,y,z,w) */ inline const typename internal::traits::Coefficients& coeffs() const { return derived().coeffs(); } /** \returns a vector expression of the coefficients (x,y,z,w) */ inline typename internal::traits::Coefficients& coeffs() { return derived().coeffs(); } EIGEN_STRONG_INLINE QuaternionBase& operator=(const QuaternionBase& other); template EIGEN_STRONG_INLINE Derived& operator=(const QuaternionBase& other); // disabled this copy operator as it is giving very strange compilation errors when compiling // test_stdvector with GCC 4.4.2. This looks like a GCC bug though, so feel free to re-enable it if it's // useful; however notice that we already have the templated operator= above and e.g. in MatrixBase // we didn't have to add, in addition to templated operator=, such a non-templated copy operator. // Derived& operator=(const QuaternionBase& other) // { return operator=(other); } Derived& operator=(const AngleAxisType& aa); template Derived& operator=(const MatrixBase& m); /** \returns a quaternion representing an identity rotation * \sa MatrixBase::Identity() */ static inline Quaternion Identity() { return Quaternion(1, 0, 0, 0); } /** \sa QuaternionBase::Identity(), MatrixBase::setIdentity() */ inline QuaternionBase& setIdentity() { coeffs() << 0, 0, 0, 1; return *this; } /** \returns the squared norm of the quaternion's coefficients * \sa QuaternionBase::norm(), MatrixBase::squaredNorm() */ inline Scalar squaredNorm() const { return coeffs().squaredNorm(); } /** \returns the norm of the quaternion's coefficients * \sa QuaternionBase::squaredNorm(), MatrixBase::norm() */ inline Scalar norm() const { return coeffs().norm(); } /** Normalizes the quaternion \c *this * \sa normalized(), MatrixBase::normalize() */ inline void normalize() { coeffs().normalize(); } /** \returns a normalized copy of \c *this * \sa normalize(), MatrixBase::normalized() */ inline Quaternion normalized() const { return Quaternion(coeffs().normalized()); } /** \returns the dot product of \c *this and \a other * Geometrically speaking, the dot product of two unit quaternions * corresponds to the cosine of half the angle between the two rotations. * \sa angularDistance() */ template inline Scalar dot(const QuaternionBase& other) const { return coeffs().dot(other.coeffs()); } template Scalar angularDistance(const QuaternionBase& other) const; /** \returns an equivalent 3x3 rotation matrix */ Matrix3 toRotationMatrix() const; /** \returns the quaternion which transform \a a into \a b through a rotation */ template Derived& setFromTwoVectors(const MatrixBase& a, const MatrixBase& b); template EIGEN_STRONG_INLINE Quaternion operator* (const QuaternionBase& q) const; template EIGEN_STRONG_INLINE Derived& operator*= (const QuaternionBase& q); /** \returns the quaternion describing the inverse rotation */ Quaternion inverse() const; /** \returns the conjugated quaternion */ Quaternion conjugate() const; template Quaternion slerp(const Scalar& t, const QuaternionBase& other) const; /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. * * \sa MatrixBase::isApprox() */ template bool isApprox(const QuaternionBase& other, const RealScalar& prec = NumTraits::dummy_precision()) const { return coeffs().isApprox(other.coeffs(), prec); } /** return the result vector of \a v through the rotation*/ EIGEN_STRONG_INLINE Vector3 _transformVector(Vector3 v) const; /** \returns \c *this with scalar type casted to \a NewScalarType * * Note that if \a NewScalarType is equal to the current scalar type of \c *this * then this function smartly returns a const reference to \c *this. */ template inline typename internal::cast_return_type >::type cast() const { return typename internal::cast_return_type >::type(derived()); } #ifdef EIGEN_QUATERNIONBASE_PLUGIN # include EIGEN_QUATERNIONBASE_PLUGIN #endif }; /*************************************************************************** * Definition/implementation of Quaternion ***************************************************************************/ /** \geometry_module \ingroup Geometry_Module * * \class Quaternion * * \brief The quaternion class used to represent 3D orientations and rotations * * \tparam _Scalar the scalar type, i.e., the type of the coefficients * \tparam _Options controls the memory alignment of the coefficients. Can be \# AutoAlign or \# DontAlign. Default is AutoAlign. * * This class represents a quaternion \f$ w+xi+yj+zk \f$ that is a convenient representation of * orientations and rotations of objects in three dimensions. Compared to other representations * like Euler angles or 3x3 matrices, quaternions offer the following advantages: * \li \b compact storage (4 scalars) * \li \b efficient to compose (28 flops), * \li \b stable spherical interpolation * * The following two typedefs are provided for convenience: * \li \c Quaternionf for \c float * \li \c Quaterniond for \c double * * \warning Operations interpreting the quaternion as rotation have undefined behavior if the quaternion is not normalized. * * \sa class AngleAxis, class Transform */ namespace internal { template struct traits > { typedef Quaternion<_Scalar,_Options> PlainObject; typedef _Scalar Scalar; typedef Matrix<_Scalar,4,1,_Options> Coefficients; enum{ IsAligned = internal::traits::Flags & AlignedBit, Flags = IsAligned ? (AlignedBit | LvalueBit) : LvalueBit }; }; } template class Quaternion : public QuaternionBase > { typedef QuaternionBase > Base; enum { IsAligned = internal::traits::IsAligned }; public: typedef _Scalar Scalar; EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Quaternion) using Base::operator*=; typedef typename internal::traits::Coefficients Coefficients; typedef typename Base::AngleAxisType AngleAxisType; /** Default constructor leaving the quaternion uninitialized. */ inline Quaternion() {} /** Constructs and initializes the quaternion \f$ w+xi+yj+zk \f$ from * its four coefficients \a w, \a x, \a y and \a z. * * \warning Note the order of the arguments: the real \a w coefficient first, * while internally the coefficients are stored in the following order: * [\c x, \c y, \c z, \c w] */ inline Quaternion(const Scalar& w, const Scalar& x, const Scalar& y, const Scalar& z) : m_coeffs(x, y, z, w){} /** Constructs and initialize a quaternion from the array data */ inline Quaternion(const Scalar* data) : m_coeffs(data) {} /** Copy constructor */ template EIGEN_STRONG_INLINE Quaternion(const QuaternionBase& other) { this->Base::operator=(other); } /** Constructs and initializes a quaternion from the angle-axis \a aa */ explicit inline Quaternion(const AngleAxisType& aa) { *this = aa; } /** Constructs and initializes a quaternion from either: * - a rotation matrix expression, * - a 4D vector expression representing quaternion coefficients. */ template explicit inline Quaternion(const MatrixBase& other) { *this = other; } /** Explicit copy constructor with scalar conversion */ template explicit inline Quaternion(const Quaternion& other) { m_coeffs = other.coeffs().template cast(); } template static Quaternion FromTwoVectors(const MatrixBase& a, const MatrixBase& b); inline Coefficients& coeffs() { return m_coeffs;} inline const Coefficients& coeffs() const { return m_coeffs;} EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(IsAligned) protected: Coefficients m_coeffs; #ifndef EIGEN_PARSED_BY_DOXYGEN static EIGEN_STRONG_INLINE void _check_template_params() { EIGEN_STATIC_ASSERT( (_Options & DontAlign) == _Options, INVALID_MATRIX_TEMPLATE_PARAMETERS) } #endif }; /** \ingroup Geometry_Module * single precision quaternion type */ typedef Quaternion Quaternionf; /** \ingroup Geometry_Module * double precision quaternion type */ typedef Quaternion Quaterniond; /*************************************************************************** * Specialization of Map> ***************************************************************************/ namespace internal { template struct traits, _Options> > : traits > { typedef Map, _Options> Coefficients; }; } namespace internal { template struct traits, _Options> > : traits > { typedef Map, _Options> Coefficients; typedef traits > TraitsBase; enum { Flags = TraitsBase::Flags & ~LvalueBit }; }; } /** \ingroup Geometry_Module * \brief Quaternion expression mapping a constant memory buffer * * \tparam _Scalar the type of the Quaternion coefficients * \tparam _Options see class Map * * This is a specialization of class Map for Quaternion. This class allows to view * a 4 scalar memory buffer as an Eigen's Quaternion object. * * \sa class Map, class Quaternion, class QuaternionBase */ template class Map, _Options > : public QuaternionBase, _Options> > { typedef QuaternionBase, _Options> > Base; public: typedef _Scalar Scalar; typedef typename internal::traits::Coefficients Coefficients; EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Map) using Base::operator*=; /** Constructs a Mapped Quaternion object from the pointer \a coeffs * * The pointer \a coeffs must reference the four coefficients of Quaternion in the following order: * \code *coeffs == {x, y, z, w} \endcode * * If the template parameter _Options is set to #Aligned, then the pointer coeffs must be aligned. */ EIGEN_STRONG_INLINE Map(const Scalar* coeffs) : m_coeffs(coeffs) {} inline const Coefficients& coeffs() const { return m_coeffs;} protected: const Coefficients m_coeffs; }; /** \ingroup Geometry_Module * \brief Expression of a quaternion from a memory buffer * * \tparam _Scalar the type of the Quaternion coefficients * \tparam _Options see class Map * * This is a specialization of class Map for Quaternion. This class allows to view * a 4 scalar memory buffer as an Eigen's Quaternion object. * * \sa class Map, class Quaternion, class QuaternionBase */ template class Map, _Options > : public QuaternionBase, _Options> > { typedef QuaternionBase, _Options> > Base; public: typedef _Scalar Scalar; typedef typename internal::traits::Coefficients Coefficients; EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Map) using Base::operator*=; /** Constructs a Mapped Quaternion object from the pointer \a coeffs * * The pointer \a coeffs must reference the four coefficients of Quaternion in the following order: * \code *coeffs == {x, y, z, w} \endcode * * If the template parameter _Options is set to #Aligned, then the pointer coeffs must be aligned. */ EIGEN_STRONG_INLINE Map(Scalar* coeffs) : m_coeffs(coeffs) {} inline Coefficients& coeffs() { return m_coeffs; } inline const Coefficients& coeffs() const { return m_coeffs; } protected: Coefficients m_coeffs; }; /** \ingroup Geometry_Module * Map an unaligned array of single precision scalars as a quaternion */ typedef Map, 0> QuaternionMapf; /** \ingroup Geometry_Module * Map an unaligned array of double precision scalars as a quaternion */ typedef Map, 0> QuaternionMapd; /** \ingroup Geometry_Module * Map a 16-byte aligned array of single precision scalars as a quaternion */ typedef Map, Aligned> QuaternionMapAlignedf; /** \ingroup Geometry_Module * Map a 16-byte aligned array of double precision scalars as a quaternion */ typedef Map, Aligned> QuaternionMapAlignedd; /*************************************************************************** * Implementation of QuaternionBase methods ***************************************************************************/ // Generic Quaternion * Quaternion product // This product can be specialized for a given architecture via the Arch template argument. namespace internal { template struct quat_product { static EIGEN_STRONG_INLINE Quaternion run(const QuaternionBase& a, const QuaternionBase& b){ return Quaternion ( a.w() * b.w() - a.x() * b.x() - a.y() * b.y() - a.z() * b.z(), a.w() * b.x() + a.x() * b.w() + a.y() * b.z() - a.z() * b.y(), a.w() * b.y() + a.y() * b.w() + a.z() * b.x() - a.x() * b.z(), a.w() * b.z() + a.z() * b.w() + a.x() * b.y() - a.y() * b.x() ); } }; } /** \returns the concatenation of two rotations as a quaternion-quaternion product */ template template EIGEN_STRONG_INLINE Quaternion::Scalar> QuaternionBase::operator* (const QuaternionBase& other) const { EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) return internal::quat_product::Scalar, internal::traits::IsAligned && internal::traits::IsAligned>::run(*this, other); } /** \sa operator*(Quaternion) */ template template EIGEN_STRONG_INLINE Derived& QuaternionBase::operator*= (const QuaternionBase& other) { derived() = derived() * other.derived(); return derived(); } /** Rotation of a vector by a quaternion. * \remarks If the quaternion is used to rotate several points (>1) * then it is much more efficient to first convert it to a 3x3 Matrix. * Comparison of the operation cost for n transformations: * - Quaternion2: 30n * - Via a Matrix3: 24 + 15n */ template EIGEN_STRONG_INLINE typename QuaternionBase::Vector3 QuaternionBase::_transformVector(Vector3 v) const { // Note that this algorithm comes from the optimization by hand // of the conversion to a Matrix followed by a Matrix/Vector product. // It appears to be much faster than the common algorithm found // in the literature (30 versus 39 flops). It also requires two // Vector3 as temporaries. Vector3 uv = this->vec().cross(v); uv += uv; return v + this->w() * uv + this->vec().cross(uv); } template EIGEN_STRONG_INLINE QuaternionBase& QuaternionBase::operator=(const QuaternionBase& other) { coeffs() = other.coeffs(); return derived(); } template template EIGEN_STRONG_INLINE Derived& QuaternionBase::operator=(const QuaternionBase& other) { coeffs() = other.coeffs(); return derived(); } /** Set \c *this from an angle-axis \a aa and returns a reference to \c *this */ template EIGEN_STRONG_INLINE Derived& QuaternionBase::operator=(const AngleAxisType& aa) { using std::cos; using std::sin; Scalar ha = Scalar(0.5)*aa.angle(); // Scalar(0.5) to suppress precision loss warnings this->w() = cos(ha); this->vec() = sin(ha) * aa.axis(); return derived(); } /** Set \c *this from the expression \a xpr: * - if \a xpr is a 4x1 vector, then \a xpr is assumed to be a quaternion * - if \a xpr is a 3x3 matrix, then \a xpr is assumed to be rotation matrix * and \a xpr is converted to a quaternion */ template template inline Derived& QuaternionBase::operator=(const MatrixBase& xpr) { EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) internal::quaternionbase_assign_impl::run(*this, xpr.derived()); return derived(); } /** Convert the quaternion to a 3x3 rotation matrix. The quaternion is required to * be normalized, otherwise the result is undefined. */ template inline typename QuaternionBase::Matrix3 QuaternionBase::toRotationMatrix(void) const { // NOTE if inlined, then gcc 4.2 and 4.4 get rid of the temporary (not gcc 4.3 !!) // if not inlined then the cost of the return by value is huge ~ +35%, // however, not inlining this function is an order of magnitude slower, so // it has to be inlined, and so the return by value is not an issue Matrix3 res; const Scalar tx = Scalar(2)*this->x(); const Scalar ty = Scalar(2)*this->y(); const Scalar tz = Scalar(2)*this->z(); const Scalar twx = tx*this->w(); const Scalar twy = ty*this->w(); const Scalar twz = tz*this->w(); const Scalar txx = tx*this->x(); const Scalar txy = ty*this->x(); const Scalar txz = tz*this->x(); const Scalar tyy = ty*this->y(); const Scalar tyz = tz*this->y(); const Scalar tzz = tz*this->z(); res.coeffRef(0,0) = Scalar(1)-(tyy+tzz); res.coeffRef(0,1) = txy-twz; res.coeffRef(0,2) = txz+twy; res.coeffRef(1,0) = txy+twz; res.coeffRef(1,1) = Scalar(1)-(txx+tzz); res.coeffRef(1,2) = tyz-twx; res.coeffRef(2,0) = txz-twy; res.coeffRef(2,1) = tyz+twx; res.coeffRef(2,2) = Scalar(1)-(txx+tyy); return res; } /** Sets \c *this to be a quaternion representing a rotation between * the two arbitrary vectors \a a and \a b. In other words, the built * rotation represent a rotation sending the line of direction \a a * to the line of direction \a b, both lines passing through the origin. * * \returns a reference to \c *this. * * Note that the two input vectors do \b not have to be normalized, and * do not need to have the same norm. */ template template inline Derived& QuaternionBase::setFromTwoVectors(const MatrixBase& a, const MatrixBase& b) { using std::max; using std::sqrt; Vector3 v0 = a.normalized(); Vector3 v1 = b.normalized(); Scalar c = v1.dot(v0); // if dot == -1, vectors are nearly opposites // => accurately compute the rotation axis by computing the // intersection of the two planes. This is done by solving: // x^T v0 = 0 // x^T v1 = 0 // under the constraint: // ||x|| = 1 // which yields a singular value problem if (c < Scalar(-1)+NumTraits::dummy_precision()) { c = (max)(c,Scalar(-1)); Matrix m; m << v0.transpose(), v1.transpose(); JacobiSVD > svd(m, ComputeFullV); Vector3 axis = svd.matrixV().col(2); Scalar w2 = (Scalar(1)+c)*Scalar(0.5); this->w() = sqrt(w2); this->vec() = axis * sqrt(Scalar(1) - w2); return derived(); } Vector3 axis = v0.cross(v1); Scalar s = sqrt((Scalar(1)+c)*Scalar(2)); Scalar invs = Scalar(1)/s; this->vec() = axis * invs; this->w() = s * Scalar(0.5); return derived(); } /** Returns a quaternion representing a rotation between * the two arbitrary vectors \a a and \a b. In other words, the built * rotation represent a rotation sending the line of direction \a a * to the line of direction \a b, both lines passing through the origin. * * \returns resulting quaternion * * Note that the two input vectors do \b not have to be normalized, and * do not need to have the same norm. */ template template Quaternion Quaternion::FromTwoVectors(const MatrixBase& a, const MatrixBase& b) { Quaternion quat; quat.setFromTwoVectors(a, b); return quat; } /** \returns the multiplicative inverse of \c *this * Note that in most cases, i.e., if you simply want the opposite rotation, * and/or the quaternion is normalized, then it is enough to use the conjugate. * * \sa QuaternionBase::conjugate() */ template inline Quaternion::Scalar> QuaternionBase::inverse() const { // FIXME should this function be called multiplicativeInverse and conjugate() be called inverse() or opposite() ?? Scalar n2 = this->squaredNorm(); if (n2 > 0) return Quaternion(conjugate().coeffs() / n2); else { // return an invalid result to flag the error return Quaternion(Coefficients::Zero()); } } /** \returns the conjugate of the \c *this which is equal to the multiplicative inverse * if the quaternion is normalized. * The conjugate of a quaternion represents the opposite rotation. * * \sa Quaternion2::inverse() */ template inline Quaternion::Scalar> QuaternionBase::conjugate() const { return Quaternion(this->w(),-this->x(),-this->y(),-this->z()); } /** \returns the angle (in radian) between two rotations * \sa dot() */ template template inline typename internal::traits::Scalar QuaternionBase::angularDistance(const QuaternionBase& other) const { using std::acos; using std::abs; Scalar d = abs(this->dot(other)); if (d>=Scalar(1)) return Scalar(0); return Scalar(2) * acos(d); } /** \returns the spherical linear interpolation between the two quaternions * \c *this and \a other at the parameter \a t in [0;1]. * * This represents an interpolation for a constant motion between \c *this and \a other, * see also http://en.wikipedia.org/wiki/Slerp. */ template template Quaternion::Scalar> QuaternionBase::slerp(const Scalar& t, const QuaternionBase& other) const { using std::acos; using std::sin; using std::abs; static const Scalar one = Scalar(1) - NumTraits::epsilon(); Scalar d = this->dot(other); Scalar absD = abs(d); Scalar scale0; Scalar scale1; if(absD>=one) { scale0 = Scalar(1) - t; scale1 = t; } else { // theta is the angle between the 2 quaternions Scalar theta = acos(absD); Scalar sinTheta = sin(theta); scale0 = sin( ( Scalar(1) - t ) * theta) / sinTheta; scale1 = sin( ( t * theta) ) / sinTheta; } if(d<0) scale1 = -scale1; return Quaternion(scale0 * coeffs() + scale1 * other.coeffs()); } namespace internal { // set from a rotation matrix template struct quaternionbase_assign_impl { typedef typename Other::Scalar Scalar; typedef DenseIndex Index; template static inline void run(QuaternionBase& q, const Other& mat) { using std::sqrt; // This algorithm comes from "Quaternion Calculus and Fast Animation", // Ken Shoemake, 1987 SIGGRAPH course notes Scalar t = mat.trace(); if (t > Scalar(0)) { t = sqrt(t + Scalar(1.0)); q.w() = Scalar(0.5)*t; t = Scalar(0.5)/t; q.x() = (mat.coeff(2,1) - mat.coeff(1,2)) * t; q.y() = (mat.coeff(0,2) - mat.coeff(2,0)) * t; q.z() = (mat.coeff(1,0) - mat.coeff(0,1)) * t; } else { DenseIndex i = 0; if (mat.coeff(1,1) > mat.coeff(0,0)) i = 1; if (mat.coeff(2,2) > mat.coeff(i,i)) i = 2; DenseIndex j = (i+1)%3; DenseIndex k = (j+1)%3; t = sqrt(mat.coeff(i,i)-mat.coeff(j,j)-mat.coeff(k,k) + Scalar(1.0)); q.coeffs().coeffRef(i) = Scalar(0.5) * t; t = Scalar(0.5)/t; q.w() = (mat.coeff(k,j)-mat.coeff(j,k))*t; q.coeffs().coeffRef(j) = (mat.coeff(j,i)+mat.coeff(i,j))*t; q.coeffs().coeffRef(k) = (mat.coeff(k,i)+mat.coeff(i,k))*t; } } }; // set from a vector of coefficients assumed to be a quaternion template struct quaternionbase_assign_impl { typedef typename Other::Scalar Scalar; template static inline void run(QuaternionBase& q, const Other& vec) { q.coeffs() = vec; } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_QUATERNION_H antimony/lib/fab/vendor/Eigen/src/Geometry/Scaling.h0000644000175000017500000001422213341130426021637 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SCALING_H #define EIGEN_SCALING_H namespace Eigen { /** \geometry_module \ingroup Geometry_Module * * \class Scaling * * \brief Represents a generic uniform scaling transformation * * \param _Scalar the scalar type, i.e., the type of the coefficients. * * This class represent a uniform scaling transformation. It is the return * type of Scaling(Scalar), and most of the time this is the only way it * is used. In particular, this class is not aimed to be used to store a scaling transformation, * but rather to make easier the constructions and updates of Transform objects. * * To represent an axis aligned scaling, use the DiagonalMatrix class. * * \sa Scaling(), class DiagonalMatrix, MatrixBase::asDiagonal(), class Translation, class Transform */ template class UniformScaling { public: /** the scalar type of the coefficients */ typedef _Scalar Scalar; protected: Scalar m_factor; public: /** Default constructor without initialization. */ UniformScaling() {} /** Constructs and initialize a uniform scaling transformation */ explicit inline UniformScaling(const Scalar& s) : m_factor(s) {} inline const Scalar& factor() const { return m_factor; } inline Scalar& factor() { return m_factor; } /** Concatenates two uniform scaling */ inline UniformScaling operator* (const UniformScaling& other) const { return UniformScaling(m_factor * other.factor()); } /** Concatenates a uniform scaling and a translation */ template inline Transform operator* (const Translation& t) const; /** Concatenates a uniform scaling and an affine transformation */ template inline Transform operator* (const Transform& t) const { Transform res = t; res.prescale(factor()); return res; } /** Concatenates a uniform scaling and a linear transformation matrix */ // TODO returns an expression template inline typename internal::plain_matrix_type::type operator* (const MatrixBase& other) const { return other * m_factor; } template inline Matrix operator*(const RotationBase& r) const { return r.toRotationMatrix() * m_factor; } /** \returns the inverse scaling */ inline UniformScaling inverse() const { return UniformScaling(Scalar(1)/m_factor); } /** \returns \c *this with scalar type casted to \a NewScalarType * * Note that if \a NewScalarType is equal to the current scalar type of \c *this * then this function smartly returns a const reference to \c *this. */ template inline UniformScaling cast() const { return UniformScaling(NewScalarType(m_factor)); } /** Copy constructor with scalar type conversion */ template inline explicit UniformScaling(const UniformScaling& other) { m_factor = Scalar(other.factor()); } /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. * * \sa MatrixBase::isApprox() */ bool isApprox(const UniformScaling& other, const typename NumTraits::Real& prec = NumTraits::dummy_precision()) const { return internal::isApprox(m_factor, other.factor(), prec); } }; /** Concatenates a linear transformation matrix and a uniform scaling */ // NOTE this operator is defiend in MatrixBase and not as a friend function // of UniformScaling to fix an internal crash of Intel's ICC template typename MatrixBase::ScalarMultipleReturnType MatrixBase::operator*(const UniformScaling& s) const { return derived() * s.factor(); } /** Constructs a uniform scaling from scale factor \a s */ static inline UniformScaling Scaling(float s) { return UniformScaling(s); } /** Constructs a uniform scaling from scale factor \a s */ static inline UniformScaling Scaling(double s) { return UniformScaling(s); } /** Constructs a uniform scaling from scale factor \a s */ template static inline UniformScaling > Scaling(const std::complex& s) { return UniformScaling >(s); } /** Constructs a 2D axis aligned scaling */ template static inline DiagonalMatrix Scaling(const Scalar& sx, const Scalar& sy) { return DiagonalMatrix(sx, sy); } /** Constructs a 3D axis aligned scaling */ template static inline DiagonalMatrix Scaling(const Scalar& sx, const Scalar& sy, const Scalar& sz) { return DiagonalMatrix(sx, sy, sz); } /** Constructs an axis aligned scaling expression from vector expression \a coeffs * This is an alias for coeffs.asDiagonal() */ template static inline const DiagonalWrapper Scaling(const MatrixBase& coeffs) { return coeffs.asDiagonal(); } /** \addtogroup Geometry_Module */ //@{ /** \deprecated */ typedef DiagonalMatrix AlignedScaling2f; /** \deprecated */ typedef DiagonalMatrix AlignedScaling2d; /** \deprecated */ typedef DiagonalMatrix AlignedScaling3f; /** \deprecated */ typedef DiagonalMatrix AlignedScaling3d; //@} template template inline Transform UniformScaling::operator* (const Translation& t) const { Transform res; res.matrix().setZero(); res.linear().diagonal().fill(factor()); res.translation() = factor() * t.vector(); res(Dim,Dim) = Scalar(1); return res; } } // end namespace Eigen #endif // EIGEN_SCALING_H antimony/lib/fab/vendor/Eigen/src/Geometry/RotationBase.h0000644000175000017500000001702313341130426022653 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ROTATIONBASE_H #define EIGEN_ROTATIONBASE_H namespace Eigen { // forward declaration namespace internal { template struct rotation_base_generic_product_selector; } /** \class RotationBase * * \brief Common base class for compact rotation representations * * \param Derived is the derived type, i.e., a rotation type * \param _Dim the dimension of the space */ template class RotationBase { public: enum { Dim = _Dim }; /** the scalar type of the coefficients */ typedef typename internal::traits::Scalar Scalar; /** corresponding linear transformation matrix type */ typedef Matrix RotationMatrixType; typedef Matrix VectorType; public: inline const Derived& derived() const { return *static_cast(this); } inline Derived& derived() { return *static_cast(this); } /** \returns an equivalent rotation matrix */ inline RotationMatrixType toRotationMatrix() const { return derived().toRotationMatrix(); } /** \returns an equivalent rotation matrix * This function is added to be conform with the Transform class' naming scheme. */ inline RotationMatrixType matrix() const { return derived().toRotationMatrix(); } /** \returns the inverse rotation */ inline Derived inverse() const { return derived().inverse(); } /** \returns the concatenation of the rotation \c *this with a translation \a t */ inline Transform operator*(const Translation& t) const { return Transform(*this) * t; } /** \returns the concatenation of the rotation \c *this with a uniform scaling \a s */ inline RotationMatrixType operator*(const UniformScaling& s) const { return toRotationMatrix() * s.factor(); } /** \returns the concatenation of the rotation \c *this with a generic expression \a e * \a e can be: * - a DimxDim linear transformation matrix * - a DimxDim diagonal matrix (axis aligned scaling) * - a vector of size Dim */ template EIGEN_STRONG_INLINE typename internal::rotation_base_generic_product_selector::ReturnType operator*(const EigenBase& e) const { return internal::rotation_base_generic_product_selector::run(derived(), e.derived()); } /** \returns the concatenation of a linear transformation \a l with the rotation \a r */ template friend inline RotationMatrixType operator*(const EigenBase& l, const Derived& r) { return l.derived() * r.toRotationMatrix(); } /** \returns the concatenation of a scaling \a l with the rotation \a r */ friend inline Transform operator*(const DiagonalMatrix& l, const Derived& r) { Transform res(r); res.linear().applyOnTheLeft(l); return res; } /** \returns the concatenation of the rotation \c *this with a transformation \a t */ template inline Transform operator*(const Transform& t) const { return toRotationMatrix() * t; } template inline VectorType _transformVector(const OtherVectorType& v) const { return toRotationMatrix() * v; } }; namespace internal { // implementation of the generic product rotation * matrix template struct rotation_base_generic_product_selector { enum { Dim = RotationDerived::Dim }; typedef Matrix ReturnType; static inline ReturnType run(const RotationDerived& r, const MatrixType& m) { return r.toRotationMatrix() * m; } }; template struct rotation_base_generic_product_selector< RotationDerived, DiagonalMatrix, false > { typedef Transform ReturnType; static inline ReturnType run(const RotationDerived& r, const DiagonalMatrix& m) { ReturnType res(r); res.linear() *= m; return res; } }; template struct rotation_base_generic_product_selector { enum { Dim = RotationDerived::Dim }; typedef Matrix ReturnType; static EIGEN_STRONG_INLINE ReturnType run(const RotationDerived& r, const OtherVectorType& v) { return r._transformVector(v); } }; } // end namespace internal /** \geometry_module * * \brief Constructs a Dim x Dim rotation matrix from the rotation \a r */ template template Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols> ::Matrix(const RotationBase& r) { EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Matrix,int(OtherDerived::Dim),int(OtherDerived::Dim)) *this = r.toRotationMatrix(); } /** \geometry_module * * \brief Set a Dim x Dim rotation matrix from the rotation \a r */ template template Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols>& Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols> ::operator=(const RotationBase& r) { EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Matrix,int(OtherDerived::Dim),int(OtherDerived::Dim)) return *this = r.toRotationMatrix(); } namespace internal { /** \internal * * Helper function to return an arbitrary rotation object to a rotation matrix. * * \param Scalar the numeric type of the matrix coefficients * \param Dim the dimension of the current space * * It returns a Dim x Dim fixed size matrix. * * Default specializations are provided for: * - any scalar type (2D), * - any matrix expression, * - any type based on RotationBase (e.g., Quaternion, AngleAxis, Rotation2D) * * Currently toRotationMatrix is only used by Transform. * * \sa class Transform, class Rotation2D, class Quaternion, class AngleAxis */ template static inline Matrix toRotationMatrix(const Scalar& s) { EIGEN_STATIC_ASSERT(Dim==2,YOU_MADE_A_PROGRAMMING_MISTAKE) return Rotation2D(s).toRotationMatrix(); } template static inline Matrix toRotationMatrix(const RotationBase& r) { return r.toRotationMatrix(); } template static inline const MatrixBase& toRotationMatrix(const MatrixBase& mat) { EIGEN_STATIC_ASSERT(OtherDerived::RowsAtCompileTime==Dim && OtherDerived::ColsAtCompileTime==Dim, YOU_MADE_A_PROGRAMMING_MISTAKE) return mat; } } // end namespace internal } // end namespace Eigen #endif // EIGEN_ROTATIONBASE_H antimony/lib/fab/vendor/Eigen/src/Geometry/Hyperplane.h0000644000175000017500000002623513341130426022375 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // Copyright (C) 2008 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_HYPERPLANE_H #define EIGEN_HYPERPLANE_H namespace Eigen { /** \geometry_module \ingroup Geometry_Module * * \class Hyperplane * * \brief A hyperplane * * A hyperplane is an affine subspace of dimension n-1 in a space of dimension n. * For example, a hyperplane in a plane is a line; a hyperplane in 3-space is a plane. * * \param _Scalar the scalar type, i.e., the type of the coefficients * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic. * Notice that the dimension of the hyperplane is _AmbientDim-1. * * This class represents an hyperplane as the zero set of the implicit equation * \f$ n \cdot x + d = 0 \f$ where \f$ n \f$ is a unit normal vector of the plane (linear part) * and \f$ d \f$ is the distance (offset) to the origin. */ template class Hyperplane { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==Dynamic ? Dynamic : _AmbientDim+1) enum { AmbientDimAtCompileTime = _AmbientDim, Options = _Options }; typedef _Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef DenseIndex Index; typedef Matrix VectorType; typedef Matrix Coefficients; typedef Block NormalReturnType; typedef const Block ConstNormalReturnType; /** Default constructor without initialization */ inline Hyperplane() {} template Hyperplane(const Hyperplane& other) : m_coeffs(other.coeffs()) {} /** Constructs a dynamic-size hyperplane with \a _dim the dimension * of the ambient space */ inline explicit Hyperplane(Index _dim) : m_coeffs(_dim+1) {} /** Construct a plane from its normal \a n and a point \a e onto the plane. * \warning the vector normal is assumed to be normalized. */ inline Hyperplane(const VectorType& n, const VectorType& e) : m_coeffs(n.size()+1) { normal() = n; offset() = -n.dot(e); } /** Constructs a plane from its normal \a n and distance to the origin \a d * such that the algebraic equation of the plane is \f$ n \cdot x + d = 0 \f$. * \warning the vector normal is assumed to be normalized. */ inline Hyperplane(const VectorType& n, const Scalar& d) : m_coeffs(n.size()+1) { normal() = n; offset() = d; } /** Constructs a hyperplane passing through the two points. If the dimension of the ambient space * is greater than 2, then there isn't uniqueness, so an arbitrary choice is made. */ static inline Hyperplane Through(const VectorType& p0, const VectorType& p1) { Hyperplane result(p0.size()); result.normal() = (p1 - p0).unitOrthogonal(); result.offset() = -p0.dot(result.normal()); return result; } /** Constructs a hyperplane passing through the three points. The dimension of the ambient space * is required to be exactly 3. */ static inline Hyperplane Through(const VectorType& p0, const VectorType& p1, const VectorType& p2) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 3) Hyperplane result(p0.size()); VectorType v0(p2 - p0), v1(p1 - p0); result.normal() = v0.cross(v1); RealScalar norm = result.normal().norm(); if(norm <= v0.norm() * v1.norm() * NumTraits::epsilon()) { Matrix m; m << v0.transpose(), v1.transpose(); JacobiSVD > svd(m, ComputeFullV); result.normal() = svd.matrixV().col(2); } else result.normal() /= norm; result.offset() = -p0.dot(result.normal()); return result; } /** Constructs a hyperplane passing through the parametrized line \a parametrized. * If the dimension of the ambient space is greater than 2, then there isn't uniqueness, * so an arbitrary choice is made. */ // FIXME to be consitent with the rest this could be implemented as a static Through function ?? explicit Hyperplane(const ParametrizedLine& parametrized) { normal() = parametrized.direction().unitOrthogonal(); offset() = -parametrized.origin().dot(normal()); } ~Hyperplane() {} /** \returns the dimension in which the plane holds */ inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_coeffs.size()-1 : Index(AmbientDimAtCompileTime); } /** normalizes \c *this */ void normalize(void) { m_coeffs /= normal().norm(); } /** \returns the signed distance between the plane \c *this and a point \a p. * \sa absDistance() */ inline Scalar signedDistance(const VectorType& p) const { return normal().dot(p) + offset(); } /** \returns the absolute distance between the plane \c *this and a point \a p. * \sa signedDistance() */ inline Scalar absDistance(const VectorType& p) const { using std::abs; return abs(signedDistance(p)); } /** \returns the projection of a point \a p onto the plane \c *this. */ inline VectorType projection(const VectorType& p) const { return p - signedDistance(p) * normal(); } /** \returns a constant reference to the unit normal vector of the plane, which corresponds * to the linear part of the implicit equation. */ inline ConstNormalReturnType normal() const { return ConstNormalReturnType(m_coeffs,0,0,dim(),1); } /** \returns a non-constant reference to the unit normal vector of the plane, which corresponds * to the linear part of the implicit equation. */ inline NormalReturnType normal() { return NormalReturnType(m_coeffs,0,0,dim(),1); } /** \returns the distance to the origin, which is also the "constant term" of the implicit equation * \warning the vector normal is assumed to be normalized. */ inline const Scalar& offset() const { return m_coeffs.coeff(dim()); } /** \returns a non-constant reference to the distance to the origin, which is also the constant part * of the implicit equation */ inline Scalar& offset() { return m_coeffs(dim()); } /** \returns a constant reference to the coefficients c_i of the plane equation: * \f$ c_0*x_0 + ... + c_{d-1}*x_{d-1} + c_d = 0 \f$ */ inline const Coefficients& coeffs() const { return m_coeffs; } /** \returns a non-constant reference to the coefficients c_i of the plane equation: * \f$ c_0*x_0 + ... + c_{d-1}*x_{d-1} + c_d = 0 \f$ */ inline Coefficients& coeffs() { return m_coeffs; } /** \returns the intersection of *this with \a other. * * \warning The ambient space must be a plane, i.e. have dimension 2, so that \c *this and \a other are lines. * * \note If \a other is approximately parallel to *this, this method will return any point on *this. */ VectorType intersection(const Hyperplane& other) const { using std::abs; EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2) Scalar det = coeffs().coeff(0) * other.coeffs().coeff(1) - coeffs().coeff(1) * other.coeffs().coeff(0); // since the line equations ax+by=c are normalized with a^2+b^2=1, the following tests // whether the two lines are approximately parallel. if(internal::isMuchSmallerThan(det, Scalar(1))) { // special case where the two lines are approximately parallel. Pick any point on the first line. if(abs(coeffs().coeff(1))>abs(coeffs().coeff(0))) return VectorType(coeffs().coeff(1), -coeffs().coeff(2)/coeffs().coeff(1)-coeffs().coeff(0)); else return VectorType(-coeffs().coeff(2)/coeffs().coeff(0)-coeffs().coeff(1), coeffs().coeff(0)); } else { // general case Scalar invdet = Scalar(1) / det; return VectorType(invdet*(coeffs().coeff(1)*other.coeffs().coeff(2)-other.coeffs().coeff(1)*coeffs().coeff(2)), invdet*(other.coeffs().coeff(0)*coeffs().coeff(2)-coeffs().coeff(0)*other.coeffs().coeff(2))); } } /** Applies the transformation matrix \a mat to \c *this and returns a reference to \c *this. * * \param mat the Dim x Dim transformation matrix * \param traits specifies whether the matrix \a mat represents an #Isometry * or a more generic #Affine transformation. The default is #Affine. */ template inline Hyperplane& transform(const MatrixBase& mat, TransformTraits traits = Affine) { if (traits==Affine) normal() = mat.inverse().transpose() * normal(); else if (traits==Isometry) normal() = mat * normal(); else { eigen_assert(0 && "invalid traits value in Hyperplane::transform()"); } return *this; } /** Applies the transformation \a t to \c *this and returns a reference to \c *this. * * \param t the transformation of dimension Dim * \param traits specifies whether the transformation \a t represents an #Isometry * or a more generic #Affine transformation. The default is #Affine. * Other kind of transformations are not supported. */ template inline Hyperplane& transform(const Transform& t, TransformTraits traits = Affine) { transform(t.linear(), traits); offset() -= normal().dot(t.translation()); return *this; } /** \returns \c *this with scalar type casted to \a NewScalarType * * Note that if \a NewScalarType is equal to the current scalar type of \c *this * then this function smartly returns a const reference to \c *this. */ template inline typename internal::cast_return_type >::type cast() const { return typename internal::cast_return_type >::type(*this); } /** Copy constructor with scalar type conversion */ template inline explicit Hyperplane(const Hyperplane& other) { m_coeffs = other.coeffs().template cast(); } /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. * * \sa MatrixBase::isApprox() */ template bool isApprox(const Hyperplane& other, const typename NumTraits::Real& prec = NumTraits::dummy_precision()) const { return m_coeffs.isApprox(other.m_coeffs, prec); } protected: Coefficients m_coeffs; }; } // end namespace Eigen #endif // EIGEN_HYPERPLANE_H antimony/lib/fab/vendor/Eigen/src/Geometry/Umeyama.h0000644000175000017500000001512213341130426021655 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Hauke Heibel // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_UMEYAMA_H #define EIGEN_UMEYAMA_H // This file requires the user to include // * Eigen/Core // * Eigen/LU // * Eigen/SVD // * Eigen/Array namespace Eigen { #ifndef EIGEN_PARSED_BY_DOXYGEN // These helpers are required since it allows to use mixed types as parameters // for the Umeyama. The problem with mixed parameters is that the return type // cannot trivially be deduced when float and double types are mixed. namespace internal { // Compile time return type deduction for different MatrixBase types. // Different means here different alignment and parameters but the same underlying // real scalar type. template struct umeyama_transform_matrix_type { enum { MinRowsAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(MatrixType::RowsAtCompileTime, OtherMatrixType::RowsAtCompileTime), // When possible we want to choose some small fixed size value since the result // is likely to fit on the stack. So here, EIGEN_SIZE_MIN_PREFER_DYNAMIC is not what we want. HomogeneousDimension = int(MinRowsAtCompileTime) == Dynamic ? Dynamic : int(MinRowsAtCompileTime)+1 }; typedef Matrix::Scalar, HomogeneousDimension, HomogeneousDimension, AutoAlign | (traits::Flags & RowMajorBit ? RowMajor : ColMajor), HomogeneousDimension, HomogeneousDimension > type; }; } #endif /** * \geometry_module \ingroup Geometry_Module * * \brief Returns the transformation between two point sets. * * The algorithm is based on: * "Least-squares estimation of transformation parameters between two point patterns", * Shinji Umeyama, PAMI 1991, DOI: 10.1109/34.88573 * * It estimates parameters \f$ c, \mathbf{R}, \f$ and \f$ \mathbf{t} \f$ such that * \f{align*} * \frac{1}{n} \sum_{i=1}^n \vert\vert y_i - (c\mathbf{R}x_i + \mathbf{t}) \vert\vert_2^2 * \f} * is minimized. * * The algorithm is based on the analysis of the covariance matrix * \f$ \Sigma_{\mathbf{x}\mathbf{y}} \in \mathbb{R}^{d \times d} \f$ * of the input point sets \f$ \mathbf{x} \f$ and \f$ \mathbf{y} \f$ where * \f$d\f$ is corresponding to the dimension (which is typically small). * The analysis is involving the SVD having a complexity of \f$O(d^3)\f$ * though the actual computational effort lies in the covariance * matrix computation which has an asymptotic lower bound of \f$O(dm)\f$ when * the input point sets have dimension \f$d \times m\f$. * * Currently the method is working only for floating point matrices. * * \todo Should the return type of umeyama() become a Transform? * * \param src Source points \f$ \mathbf{x} = \left( x_1, \hdots, x_n \right) \f$. * \param dst Destination points \f$ \mathbf{y} = \left( y_1, \hdots, y_n \right) \f$. * \param with_scaling Sets \f$ c=1 \f$ when false is passed. * \return The homogeneous transformation * \f{align*} * T = \begin{bmatrix} c\mathbf{R} & \mathbf{t} \\ \mathbf{0} & 1 \end{bmatrix} * \f} * minimizing the resudiual above. This transformation is always returned as an * Eigen::Matrix. */ template typename internal::umeyama_transform_matrix_type::type umeyama(const MatrixBase& src, const MatrixBase& dst, bool with_scaling = true) { typedef typename internal::umeyama_transform_matrix_type::type TransformationMatrixType; typedef typename internal::traits::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename Derived::Index Index; EIGEN_STATIC_ASSERT(!NumTraits::IsComplex, NUMERIC_TYPE_MUST_BE_REAL) EIGEN_STATIC_ASSERT((internal::is_same::Scalar>::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) enum { Dimension = EIGEN_SIZE_MIN_PREFER_DYNAMIC(Derived::RowsAtCompileTime, OtherDerived::RowsAtCompileTime) }; typedef Matrix VectorType; typedef Matrix MatrixType; typedef typename internal::plain_matrix_type_row_major::type RowMajorMatrixType; const Index m = src.rows(); // dimension const Index n = src.cols(); // number of measurements // required for demeaning ... const RealScalar one_over_n = RealScalar(1) / static_cast(n); // computation of mean const VectorType src_mean = src.rowwise().sum() * one_over_n; const VectorType dst_mean = dst.rowwise().sum() * one_over_n; // demeaning of src and dst points const RowMajorMatrixType src_demean = src.colwise() - src_mean; const RowMajorMatrixType dst_demean = dst.colwise() - dst_mean; // Eq. (36)-(37) const Scalar src_var = src_demean.rowwise().squaredNorm().sum() * one_over_n; // Eq. (38) const MatrixType sigma = one_over_n * dst_demean * src_demean.transpose(); JacobiSVD svd(sigma, ComputeFullU | ComputeFullV); // Initialize the resulting transformation with an identity matrix... TransformationMatrixType Rt = TransformationMatrixType::Identity(m+1,m+1); // Eq. (39) VectorType S = VectorType::Ones(m); if (sigma.determinant() Scalar(0) ) { Rt.block(0,0,m,m).noalias() = svd.matrixU()*svd.matrixV().transpose(); } else { const Scalar s = S(m-1); S(m-1) = Scalar(-1); Rt.block(0,0,m,m).noalias() = svd.matrixU() * S.asDiagonal() * svd.matrixV().transpose(); S(m-1) = s; } } else { Rt.block(0,0,m,m).noalias() = svd.matrixU() * S.asDiagonal() * svd.matrixV().transpose(); } if (with_scaling) { // Eq. (42) const Scalar c = Scalar(1)/src_var * svd.singularValues().dot(S); // Eq. (41) Rt.col(m).head(m) = dst_mean; Rt.col(m).head(m).noalias() -= c*Rt.topLeftCorner(m,m)*src_mean; Rt.block(0,0,m,m) *= c; } else { Rt.col(m).head(m) = dst_mean; Rt.col(m).head(m).noalias() -= Rt.topLeftCorner(m,m)*src_mean; } return Rt; } } // end namespace Eigen #endif // EIGEN_UMEYAMA_H antimony/lib/fab/vendor/Eigen/src/Geometry/arch/0000755000175000017500000000000013341130426021022 5ustar tiagotiagoantimony/lib/fab/vendor/Eigen/src/Geometry/arch/CMakeLists.txt0000644000175000017500000000025013341130426023557 0ustar tiagotiagoFILE(GLOB Eigen_Geometry_arch_SRCS "*.h") INSTALL(FILES ${Eigen_Geometry_arch_SRCS} DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Geometry/arch COMPONENT Devel ) antimony/lib/fab/vendor/Eigen/src/Geometry/arch/Geometry_SSE.h0000644000175000017500000000745013341130426023506 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Rohit Garg // Copyright (C) 2009-2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GEOMETRY_SSE_H #define EIGEN_GEOMETRY_SSE_H namespace Eigen { namespace internal { template struct quat_product { static inline Quaternion run(const QuaternionBase& _a, const QuaternionBase& _b) { const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0,0,0,0x80000000)); Quaternion res; __m128 a = _a.coeffs().template packet(0); __m128 b = _b.coeffs().template packet(0); __m128 flip1 = _mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a,1,2,0,2), vec4f_swizzle1(b,2,0,1,2)),mask); __m128 flip2 = _mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a,3,3,3,1), vec4f_swizzle1(b,0,1,2,1)),mask); pstore(&res.x(), _mm_add_ps(_mm_sub_ps(_mm_mul_ps(a,vec4f_swizzle1(b,3,3,3,3)), _mm_mul_ps(vec4f_swizzle1(a,2,0,1,0), vec4f_swizzle1(b,1,2,0,0))), _mm_add_ps(flip1,flip2))); return res; } }; template struct cross3_impl { static inline typename plain_matrix_type::type run(const VectorLhs& lhs, const VectorRhs& rhs) { __m128 a = lhs.template packet(0); __m128 b = rhs.template packet(0); __m128 mul1=_mm_mul_ps(vec4f_swizzle1(a,1,2,0,3),vec4f_swizzle1(b,2,0,1,3)); __m128 mul2=_mm_mul_ps(vec4f_swizzle1(a,2,0,1,3),vec4f_swizzle1(b,1,2,0,3)); typename plain_matrix_type::type res; pstore(&res.x(),_mm_sub_ps(mul1,mul2)); return res; } }; template struct quat_product { static inline Quaternion run(const QuaternionBase& _a, const QuaternionBase& _b) { const Packet2d mask = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0)); Quaternion res; const double* a = _a.coeffs().data(); Packet2d b_xy = _b.coeffs().template packet(0); Packet2d b_zw = _b.coeffs().template packet(2); Packet2d a_xx = pset1(a[0]); Packet2d a_yy = pset1(a[1]); Packet2d a_zz = pset1(a[2]); Packet2d a_ww = pset1(a[3]); // two temporaries: Packet2d t1, t2; /* * t1 = ww*xy + yy*zw * t2 = zz*xy - xx*zw * res.xy = t1 +/- swap(t2) */ t1 = padd(pmul(a_ww, b_xy), pmul(a_yy, b_zw)); t2 = psub(pmul(a_zz, b_xy), pmul(a_xx, b_zw)); #ifdef EIGEN_VECTORIZE_SSE3 EIGEN_UNUSED_VARIABLE(mask) pstore(&res.x(), _mm_addsub_pd(t1, preverse(t2))); #else pstore(&res.x(), padd(t1, pxor(mask,preverse(t2)))); #endif /* * t1 = ww*zw - yy*xy * t2 = zz*zw + xx*xy * res.zw = t1 -/+ swap(t2) = swap( swap(t1) +/- t2) */ t1 = psub(pmul(a_ww, b_zw), pmul(a_yy, b_xy)); t2 = padd(pmul(a_zz, b_zw), pmul(a_xx, b_xy)); #ifdef EIGEN_VECTORIZE_SSE3 EIGEN_UNUSED_VARIABLE(mask) pstore(&res.z(), preverse(_mm_addsub_pd(preverse(t1), t2))); #else pstore(&res.z(), psub(t1, pxor(mask,preverse(t2)))); #endif return res; } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_GEOMETRY_SSE_H antimony/lib/fab/vendor/Eigen/src/Geometry/AlignedBox.h0000644000175000017500000003100213341130426022266 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ALIGNEDBOX_H #define EIGEN_ALIGNEDBOX_H namespace Eigen { /** \geometry_module \ingroup Geometry_Module * * * \class AlignedBox * * \brief An axis aligned box * * \param _Scalar the type of the scalar coefficients * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic. * * This class represents an axis aligned box as a pair of the minimal and maximal corners. */ template class AlignedBox { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) enum { AmbientDimAtCompileTime = _AmbientDim }; typedef _Scalar Scalar; typedef NumTraits ScalarTraits; typedef DenseIndex Index; typedef typename ScalarTraits::Real RealScalar; typedef typename ScalarTraits::NonInteger NonInteger; typedef Matrix VectorType; /** Define constants to name the corners of a 1D, 2D or 3D axis aligned bounding box */ enum CornerType { /** 1D names */ Min=0, Max=1, /** Added names for 2D */ BottomLeft=0, BottomRight=1, TopLeft=2, TopRight=3, /** Added names for 3D */ BottomLeftFloor=0, BottomRightFloor=1, TopLeftFloor=2, TopRightFloor=3, BottomLeftCeil=4, BottomRightCeil=5, TopLeftCeil=6, TopRightCeil=7 }; /** Default constructor initializing a null box. */ inline AlignedBox() { if (AmbientDimAtCompileTime!=Dynamic) setEmpty(); } /** Constructs a null box with \a _dim the dimension of the ambient space. */ inline explicit AlignedBox(Index _dim) : m_min(_dim), m_max(_dim) { setEmpty(); } /** Constructs a box with extremities \a _min and \a _max. */ template inline AlignedBox(const OtherVectorType1& _min, const OtherVectorType2& _max) : m_min(_min), m_max(_max) {} /** Constructs a box containing a single point \a p. */ template inline explicit AlignedBox(const MatrixBase& a_p) { typename internal::nested::type p(a_p.derived()); m_min = p; m_max = p; } ~AlignedBox() {} /** \returns the dimension in which the box holds */ inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size() : Index(AmbientDimAtCompileTime); } /** \deprecated use isEmpty */ inline bool isNull() const { return isEmpty(); } /** \deprecated use setEmpty */ inline void setNull() { setEmpty(); } /** \returns true if the box is empty. */ inline bool isEmpty() const { return (m_min.array() > m_max.array()).any(); } /** Makes \c *this an empty box. */ inline void setEmpty() { m_min.setConstant( ScalarTraits::highest() ); m_max.setConstant( ScalarTraits::lowest() ); } /** \returns the minimal corner */ inline const VectorType& (min)() const { return m_min; } /** \returns a non const reference to the minimal corner */ inline VectorType& (min)() { return m_min; } /** \returns the maximal corner */ inline const VectorType& (max)() const { return m_max; } /** \returns a non const reference to the maximal corner */ inline VectorType& (max)() { return m_max; } /** \returns the center of the box */ inline const CwiseUnaryOp, const CwiseBinaryOp, const VectorType, const VectorType> > center() const { return (m_min+m_max)/2; } /** \returns the lengths of the sides of the bounding box. * Note that this function does not get the same * result for integral or floating scalar types: see */ inline const CwiseBinaryOp< internal::scalar_difference_op, const VectorType, const VectorType> sizes() const { return m_max - m_min; } /** \returns the volume of the bounding box */ inline Scalar volume() const { return sizes().prod(); } /** \returns an expression for the bounding box diagonal vector * if the length of the diagonal is needed: diagonal().norm() * will provide it. */ inline CwiseBinaryOp< internal::scalar_difference_op, const VectorType, const VectorType> diagonal() const { return sizes(); } /** \returns the vertex of the bounding box at the corner defined by * the corner-id corner. It works only for a 1D, 2D or 3D bounding box. * For 1D bounding boxes corners are named by 2 enum constants: * BottomLeft and BottomRight. * For 2D bounding boxes, corners are named by 4 enum constants: * BottomLeft, BottomRight, TopLeft, TopRight. * For 3D bounding boxes, the following names are added: * BottomLeftCeil, BottomRightCeil, TopLeftCeil, TopRightCeil. */ inline VectorType corner(CornerType corner) const { EIGEN_STATIC_ASSERT(_AmbientDim <= 3, THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE); VectorType res; Index mult = 1; for(Index d=0; d(Scalar(0), Scalar(1)); } else r[d] = internal::random(m_min[d], m_max[d]); } return r; } /** \returns true if the point \a p is inside the box \c *this. */ template inline bool contains(const MatrixBase& a_p) const { typename internal::nested::type p(a_p.derived()); return (m_min.array()<=p.array()).all() && (p.array()<=m_max.array()).all(); } /** \returns true if the box \a b is entirely inside the box \c *this. */ inline bool contains(const AlignedBox& b) const { return (m_min.array()<=(b.min)().array()).all() && ((b.max)().array()<=m_max.array()).all(); } /** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. */ template inline AlignedBox& extend(const MatrixBase& a_p) { typename internal::nested::type p(a_p.derived()); m_min = m_min.cwiseMin(p); m_max = m_max.cwiseMax(p); return *this; } /** Extends \c *this such that it contains the box \a b and returns a reference to \c *this. */ inline AlignedBox& extend(const AlignedBox& b) { m_min = m_min.cwiseMin(b.m_min); m_max = m_max.cwiseMax(b.m_max); return *this; } /** Clamps \c *this by the box \a b and returns a reference to \c *this. */ inline AlignedBox& clamp(const AlignedBox& b) { m_min = m_min.cwiseMax(b.m_min); m_max = m_max.cwiseMin(b.m_max); return *this; } /** Returns an AlignedBox that is the intersection of \a b and \c *this */ inline AlignedBox intersection(const AlignedBox& b) const {return AlignedBox(m_min.cwiseMax(b.m_min), m_max.cwiseMin(b.m_max)); } /** Returns an AlignedBox that is the union of \a b and \c *this */ inline AlignedBox merged(const AlignedBox& b) const { return AlignedBox(m_min.cwiseMin(b.m_min), m_max.cwiseMax(b.m_max)); } /** Translate \c *this by the vector \a t and returns a reference to \c *this. */ template inline AlignedBox& translate(const MatrixBase& a_t) { const typename internal::nested::type t(a_t.derived()); m_min += t; m_max += t; return *this; } /** \returns the squared distance between the point \a p and the box \c *this, * and zero if \a p is inside the box. * \sa exteriorDistance() */ template inline Scalar squaredExteriorDistance(const MatrixBase& a_p) const; /** \returns the squared distance between the boxes \a b and \c *this, * and zero if the boxes intersect. * \sa exteriorDistance() */ inline Scalar squaredExteriorDistance(const AlignedBox& b) const; /** \returns the distance between the point \a p and the box \c *this, * and zero if \a p is inside the box. * \sa squaredExteriorDistance() */ template inline NonInteger exteriorDistance(const MatrixBase& p) const { using std::sqrt; return sqrt(NonInteger(squaredExteriorDistance(p))); } /** \returns the distance between the boxes \a b and \c *this, * and zero if the boxes intersect. * \sa squaredExteriorDistance() */ inline NonInteger exteriorDistance(const AlignedBox& b) const { using std::sqrt; return sqrt(NonInteger(squaredExteriorDistance(b))); } /** \returns \c *this with scalar type casted to \a NewScalarType * * Note that if \a NewScalarType is equal to the current scalar type of \c *this * then this function smartly returns a const reference to \c *this. */ template inline typename internal::cast_return_type >::type cast() const { return typename internal::cast_return_type >::type(*this); } /** Copy constructor with scalar type conversion */ template inline explicit AlignedBox(const AlignedBox& other) { m_min = (other.min)().template cast(); m_max = (other.max)().template cast(); } /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. * * \sa MatrixBase::isApprox() */ bool isApprox(const AlignedBox& other, const RealScalar& prec = ScalarTraits::dummy_precision()) const { return m_min.isApprox(other.m_min, prec) && m_max.isApprox(other.m_max, prec); } protected: VectorType m_min, m_max; }; template template inline Scalar AlignedBox::squaredExteriorDistance(const MatrixBase& a_p) const { typename internal::nested::type p(a_p.derived()); Scalar dist2(0); Scalar aux; for (Index k=0; k p[k] ) { aux = m_min[k] - p[k]; dist2 += aux*aux; } else if( p[k] > m_max[k] ) { aux = p[k] - m_max[k]; dist2 += aux*aux; } } return dist2; } template inline Scalar AlignedBox::squaredExteriorDistance(const AlignedBox& b) const { Scalar dist2(0); Scalar aux; for (Index k=0; k b.m_max[k] ) { aux = m_min[k] - b.m_max[k]; dist2 += aux*aux; } else if( b.m_min[k] > m_max[k] ) { aux = b.m_min[k] - m_max[k]; dist2 += aux*aux; } } return dist2; } /** \defgroup alignedboxtypedefs Global aligned box typedefs * * \ingroup Geometry_Module * * Eigen defines several typedef shortcuts for most common aligned box types. * * The general patterns are the following: * * \c AlignedBoxSizeType where \c Size can be \c 1, \c 2,\c 3,\c 4 for fixed size boxes or \c X for dynamic size, * and where \c Type can be \c i for integer, \c f for float, \c d for double. * * For example, \c AlignedBox3d is a fixed-size 3x3 aligned box type of doubles, and \c AlignedBoxXf is a dynamic-size aligned box of floats. * * \sa class AlignedBox */ #define EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix) \ /** \ingroup alignedboxtypedefs */ \ typedef AlignedBox AlignedBox##SizeSuffix##TypeSuffix; #define EIGEN_MAKE_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \ EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 1, 1) \ EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 2, 2) \ EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 3, 3) \ EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 4, 4) \ EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Dynamic, X) EIGEN_MAKE_TYPEDEFS_ALL_SIZES(int, i) EIGEN_MAKE_TYPEDEFS_ALL_SIZES(float, f) EIGEN_MAKE_TYPEDEFS_ALL_SIZES(double, d) #undef EIGEN_MAKE_TYPEDEFS_ALL_SIZES #undef EIGEN_MAKE_TYPEDEFS } // end namespace Eigen #endif // EIGEN_ALIGNEDBOX_H antimony/lib/fab/vendor/Eigen/src/Geometry/Homogeneous.h0000644000175000017500000002660713341130426022561 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_HOMOGENEOUS_H #define EIGEN_HOMOGENEOUS_H namespace Eigen { /** \geometry_module \ingroup Geometry_Module * * \class Homogeneous * * \brief Expression of one (or a set of) homogeneous vector(s) * * \param MatrixType the type of the object in which we are making homogeneous * * This class represents an expression of one (or a set of) homogeneous vector(s). * It is the return type of MatrixBase::homogeneous() and most of the time * this is the only way it is used. * * \sa MatrixBase::homogeneous() */ namespace internal { template struct traits > : traits { typedef typename traits::StorageKind StorageKind; typedef typename nested::type MatrixTypeNested; typedef typename remove_reference::type _MatrixTypeNested; enum { RowsPlusOne = (MatrixType::RowsAtCompileTime != Dynamic) ? int(MatrixType::RowsAtCompileTime) + 1 : Dynamic, ColsPlusOne = (MatrixType::ColsAtCompileTime != Dynamic) ? int(MatrixType::ColsAtCompileTime) + 1 : Dynamic, RowsAtCompileTime = Direction==Vertical ? RowsPlusOne : MatrixType::RowsAtCompileTime, ColsAtCompileTime = Direction==Horizontal ? ColsPlusOne : MatrixType::ColsAtCompileTime, MaxRowsAtCompileTime = RowsAtCompileTime, MaxColsAtCompileTime = ColsAtCompileTime, TmpFlags = _MatrixTypeNested::Flags & HereditaryBits, Flags = ColsAtCompileTime==1 ? (TmpFlags & ~RowMajorBit) : RowsAtCompileTime==1 ? (TmpFlags | RowMajorBit) : TmpFlags, CoeffReadCost = _MatrixTypeNested::CoeffReadCost }; }; template struct homogeneous_left_product_impl; template struct homogeneous_right_product_impl; } // end namespace internal template class Homogeneous : internal::no_assignment_operator, public MatrixBase > { public: enum { Direction = _Direction }; typedef MatrixBase Base; EIGEN_DENSE_PUBLIC_INTERFACE(Homogeneous) inline Homogeneous(const MatrixType& matrix) : m_matrix(matrix) {} inline Index rows() const { return m_matrix.rows() + (int(Direction)==Vertical ? 1 : 0); } inline Index cols() const { return m_matrix.cols() + (int(Direction)==Horizontal ? 1 : 0); } inline Scalar coeff(Index row, Index col) const { if( (int(Direction)==Vertical && row==m_matrix.rows()) || (int(Direction)==Horizontal && col==m_matrix.cols())) return 1; return m_matrix.coeff(row, col); } template inline const internal::homogeneous_right_product_impl operator* (const MatrixBase& rhs) const { eigen_assert(int(Direction)==Horizontal); return internal::homogeneous_right_product_impl(m_matrix,rhs.derived()); } template friend inline const internal::homogeneous_left_product_impl operator* (const MatrixBase& lhs, const Homogeneous& rhs) { eigen_assert(int(Direction)==Vertical); return internal::homogeneous_left_product_impl(lhs.derived(),rhs.m_matrix); } template friend inline const internal::homogeneous_left_product_impl > operator* (const Transform& lhs, const Homogeneous& rhs) { eigen_assert(int(Direction)==Vertical); return internal::homogeneous_left_product_impl >(lhs,rhs.m_matrix); } protected: typename MatrixType::Nested m_matrix; }; /** \geometry_module * * \return an expression of the equivalent homogeneous vector * * \only_for_vectors * * Example: \include MatrixBase_homogeneous.cpp * Output: \verbinclude MatrixBase_homogeneous.out * * \sa class Homogeneous */ template inline typename MatrixBase::HomogeneousReturnType MatrixBase::homogeneous() const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived); return derived(); } /** \geometry_module * * \returns a matrix expression of homogeneous column (or row) vectors * * Example: \include VectorwiseOp_homogeneous.cpp * Output: \verbinclude VectorwiseOp_homogeneous.out * * \sa MatrixBase::homogeneous() */ template inline Homogeneous VectorwiseOp::homogeneous() const { return _expression(); } /** \geometry_module * * \returns an expression of the homogeneous normalized vector of \c *this * * Example: \include MatrixBase_hnormalized.cpp * Output: \verbinclude MatrixBase_hnormalized.out * * \sa VectorwiseOp::hnormalized() */ template inline const typename MatrixBase::HNormalizedReturnType MatrixBase::hnormalized() const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived); return ConstStartMinusOne(derived(),0,0, ColsAtCompileTime==1?size()-1:1, ColsAtCompileTime==1?1:size()-1) / coeff(size()-1); } /** \geometry_module * * \returns an expression of the homogeneous normalized vector of \c *this * * Example: \include DirectionWise_hnormalized.cpp * Output: \verbinclude DirectionWise_hnormalized.out * * \sa MatrixBase::hnormalized() */ template inline const typename VectorwiseOp::HNormalizedReturnType VectorwiseOp::hnormalized() const { return HNormalized_Block(_expression(),0,0, Direction==Vertical ? _expression().rows()-1 : _expression().rows(), Direction==Horizontal ? _expression().cols()-1 : _expression().cols()).cwiseQuotient( Replicate (HNormalized_Factors(_expression(), Direction==Vertical ? _expression().rows()-1:0, Direction==Horizontal ? _expression().cols()-1:0, Direction==Vertical ? 1 : _expression().rows(), Direction==Horizontal ? 1 : _expression().cols()), Direction==Vertical ? _expression().rows()-1 : 1, Direction==Horizontal ? _expression().cols()-1 : 1)); } namespace internal { template struct take_matrix_for_product { typedef MatrixOrTransformType type; static const type& run(const type &x) { return x; } }; template struct take_matrix_for_product > { typedef Transform TransformType; typedef typename internal::add_const::type type; static type run (const TransformType& x) { return x.affine(); } }; template struct take_matrix_for_product > { typedef Transform TransformType; typedef typename TransformType::MatrixType type; static const type& run (const TransformType& x) { return x.matrix(); } }; template struct traits,Lhs> > { typedef typename take_matrix_for_product::type LhsMatrixType; typedef typename remove_all::type MatrixTypeCleaned; typedef typename remove_all::type LhsMatrixTypeCleaned; typedef typename make_proper_matrix_type< typename traits::Scalar, LhsMatrixTypeCleaned::RowsAtCompileTime, MatrixTypeCleaned::ColsAtCompileTime, MatrixTypeCleaned::PlainObject::Options, LhsMatrixTypeCleaned::MaxRowsAtCompileTime, MatrixTypeCleaned::MaxColsAtCompileTime>::type ReturnType; }; template struct homogeneous_left_product_impl,Lhs> : public ReturnByValue,Lhs> > { typedef typename traits::LhsMatrixType LhsMatrixType; typedef typename remove_all::type LhsMatrixTypeCleaned; typedef typename remove_all::type LhsMatrixTypeNested; typedef typename MatrixType::Index Index; homogeneous_left_product_impl(const Lhs& lhs, const MatrixType& rhs) : m_lhs(take_matrix_for_product::run(lhs)), m_rhs(rhs) {} inline Index rows() const { return m_lhs.rows(); } inline Index cols() const { return m_rhs.cols(); } template void evalTo(Dest& dst) const { // FIXME investigate how to allow lazy evaluation of this product when possible dst = Block (m_lhs,0,0,m_lhs.rows(),m_lhs.cols()-1) * m_rhs; dst += m_lhs.col(m_lhs.cols()-1).rowwise() .template replicate(m_rhs.cols()); } typename LhsMatrixTypeCleaned::Nested m_lhs; typename MatrixType::Nested m_rhs; }; template struct traits,Rhs> > { typedef typename make_proper_matrix_type::Scalar, MatrixType::RowsAtCompileTime, Rhs::ColsAtCompileTime, MatrixType::PlainObject::Options, MatrixType::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime>::type ReturnType; }; template struct homogeneous_right_product_impl,Rhs> : public ReturnByValue,Rhs> > { typedef typename remove_all::type RhsNested; typedef typename MatrixType::Index Index; homogeneous_right_product_impl(const MatrixType& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs) {} inline Index rows() const { return m_lhs.rows(); } inline Index cols() const { return m_rhs.cols(); } template void evalTo(Dest& dst) const { // FIXME investigate how to allow lazy evaluation of this product when possible dst = m_lhs * Block (m_rhs,0,0,m_rhs.rows()-1,m_rhs.cols()); dst += m_rhs.row(m_rhs.rows()-1).colwise() .template replicate(m_lhs.rows()); } typename MatrixType::Nested m_lhs; typename Rhs::Nested m_rhs; }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_HOMOGENEOUS_H antimony/lib/fab/vendor/Eigen/src/Geometry/AngleAxis.h0000644000175000017500000001663513341130426022144 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ANGLEAXIS_H #define EIGEN_ANGLEAXIS_H namespace Eigen { /** \geometry_module \ingroup Geometry_Module * * \class AngleAxis * * \brief Represents a 3D rotation as a rotation angle around an arbitrary 3D axis * * \param _Scalar the scalar type, i.e., the type of the coefficients. * * \warning When setting up an AngleAxis object, the axis vector \b must \b be \b normalized. * * The following two typedefs are provided for convenience: * \li \c AngleAxisf for \c float * \li \c AngleAxisd for \c double * * Combined with MatrixBase::Unit{X,Y,Z}, AngleAxis can be used to easily * mimic Euler-angles. Here is an example: * \include AngleAxis_mimic_euler.cpp * Output: \verbinclude AngleAxis_mimic_euler.out * * \note This class is not aimed to be used to store a rotation transformation, * but rather to make easier the creation of other rotation (Quaternion, rotation Matrix) * and transformation objects. * * \sa class Quaternion, class Transform, MatrixBase::UnitX() */ namespace internal { template struct traits > { typedef _Scalar Scalar; }; } template class AngleAxis : public RotationBase,3> { typedef RotationBase,3> Base; public: using Base::operator*; enum { Dim = 3 }; /** the scalar type of the coefficients */ typedef _Scalar Scalar; typedef Matrix Matrix3; typedef Matrix Vector3; typedef Quaternion QuaternionType; protected: Vector3 m_axis; Scalar m_angle; public: /** Default constructor without initialization. */ AngleAxis() {} /** Constructs and initialize the angle-axis rotation from an \a angle in radian * and an \a axis which \b must \b be \b normalized. * * \warning If the \a axis vector is not normalized, then the angle-axis object * represents an invalid rotation. */ template inline AngleAxis(const Scalar& angle, const MatrixBase& axis) : m_axis(axis), m_angle(angle) {} /** Constructs and initialize the angle-axis rotation from a quaternion \a q. */ template inline explicit AngleAxis(const QuaternionBase& q) { *this = q; } /** Constructs and initialize the angle-axis rotation from a 3x3 rotation matrix. */ template inline explicit AngleAxis(const MatrixBase& m) { *this = m; } Scalar angle() const { return m_angle; } Scalar& angle() { return m_angle; } const Vector3& axis() const { return m_axis; } Vector3& axis() { return m_axis; } /** Concatenates two rotations */ inline QuaternionType operator* (const AngleAxis& other) const { return QuaternionType(*this) * QuaternionType(other); } /** Concatenates two rotations */ inline QuaternionType operator* (const QuaternionType& other) const { return QuaternionType(*this) * other; } /** Concatenates two rotations */ friend inline QuaternionType operator* (const QuaternionType& a, const AngleAxis& b) { return a * QuaternionType(b); } /** \returns the inverse rotation, i.e., an angle-axis with opposite rotation angle */ AngleAxis inverse() const { return AngleAxis(-m_angle, m_axis); } template AngleAxis& operator=(const QuaternionBase& q); template AngleAxis& operator=(const MatrixBase& m); template AngleAxis& fromRotationMatrix(const MatrixBase& m); Matrix3 toRotationMatrix(void) const; /** \returns \c *this with scalar type casted to \a NewScalarType * * Note that if \a NewScalarType is equal to the current scalar type of \c *this * then this function smartly returns a const reference to \c *this. */ template inline typename internal::cast_return_type >::type cast() const { return typename internal::cast_return_type >::type(*this); } /** Copy constructor with scalar type conversion */ template inline explicit AngleAxis(const AngleAxis& other) { m_axis = other.axis().template cast(); m_angle = Scalar(other.angle()); } static inline const AngleAxis Identity() { return AngleAxis(0, Vector3::UnitX()); } /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. * * \sa MatrixBase::isApprox() */ bool isApprox(const AngleAxis& other, const typename NumTraits::Real& prec = NumTraits::dummy_precision()) const { return m_axis.isApprox(other.m_axis, prec) && internal::isApprox(m_angle,other.m_angle, prec); } }; /** \ingroup Geometry_Module * single precision angle-axis type */ typedef AngleAxis AngleAxisf; /** \ingroup Geometry_Module * double precision angle-axis type */ typedef AngleAxis AngleAxisd; /** Set \c *this from a \b unit quaternion. * The axis is normalized. * * \warning As any other method dealing with quaternion, if the input quaternion * is not normalized then the result is undefined. */ template template AngleAxis& AngleAxis::operator=(const QuaternionBase& q) { using std::acos; using std::min; using std::max; using std::sqrt; Scalar n2 = q.vec().squaredNorm(); if (n2 < NumTraits::dummy_precision()*NumTraits::dummy_precision()) { m_angle = 0; m_axis << 1, 0, 0; } else { m_angle = Scalar(2)*acos((min)((max)(Scalar(-1),q.w()),Scalar(1))); m_axis = q.vec() / sqrt(n2); } return *this; } /** Set \c *this from a 3x3 rotation matrix \a mat. */ template template AngleAxis& AngleAxis::operator=(const MatrixBase& mat) { // Since a direct conversion would not be really faster, // let's use the robust Quaternion implementation: return *this = QuaternionType(mat); } /** * \brief Sets \c *this from a 3x3 rotation matrix. **/ template template AngleAxis& AngleAxis::fromRotationMatrix(const MatrixBase& mat) { return *this = QuaternionType(mat); } /** Constructs and \returns an equivalent 3x3 rotation matrix. */ template typename AngleAxis::Matrix3 AngleAxis::toRotationMatrix(void) const { using std::sin; using std::cos; Matrix3 res; Vector3 sin_axis = sin(m_angle) * m_axis; Scalar c = cos(m_angle); Vector3 cos1_axis = (Scalar(1)-c) * m_axis; Scalar tmp; tmp = cos1_axis.x() * m_axis.y(); res.coeffRef(0,1) = tmp - sin_axis.z(); res.coeffRef(1,0) = tmp + sin_axis.z(); tmp = cos1_axis.x() * m_axis.z(); res.coeffRef(0,2) = tmp + sin_axis.y(); res.coeffRef(2,0) = tmp - sin_axis.y(); tmp = cos1_axis.y() * m_axis.z(); res.coeffRef(1,2) = tmp - sin_axis.x(); res.coeffRef(2,1) = tmp + sin_axis.x(); res.diagonal() = (cos1_axis.cwiseProduct(m_axis)).array() + c; return res; } } // end namespace Eigen #endif // EIGEN_ANGLEAXIS_H antimony/lib/fab/vendor/Eigen/src/Geometry/Transform.h0000644000175000017500000015564413341130426022250 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // Copyright (C) 2009 Benoit Jacob // Copyright (C) 2010 Hauke Heibel // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_TRANSFORM_H #define EIGEN_TRANSFORM_H namespace Eigen { namespace internal { template struct transform_traits { enum { Dim = Transform::Dim, HDim = Transform::HDim, Mode = Transform::Mode, IsProjective = (int(Mode)==int(Projective)) }; }; template< typename TransformType, typename MatrixType, int Case = transform_traits::IsProjective ? 0 : int(MatrixType::RowsAtCompileTime) == int(transform_traits::HDim) ? 1 : 2> struct transform_right_product_impl; template< typename Other, int Mode, int Options, int Dim, int HDim, int OtherRows=Other::RowsAtCompileTime, int OtherCols=Other::ColsAtCompileTime> struct transform_left_product_impl; template< typename Lhs, typename Rhs, bool AnyProjective = transform_traits::IsProjective || transform_traits::IsProjective> struct transform_transform_product_impl; template< typename Other, int Mode, int Options, int Dim, int HDim, int OtherRows=Other::RowsAtCompileTime, int OtherCols=Other::ColsAtCompileTime> struct transform_construct_from_matrix; template struct transform_take_affine_part; template struct transform_make_affine; } // end namespace internal /** \geometry_module \ingroup Geometry_Module * * \class Transform * * \brief Represents an homogeneous transformation in a N dimensional space * * \tparam _Scalar the scalar type, i.e., the type of the coefficients * \tparam _Dim the dimension of the space * \tparam _Mode the type of the transformation. Can be: * - #Affine: the transformation is stored as a (Dim+1)^2 matrix, * where the last row is assumed to be [0 ... 0 1]. * - #AffineCompact: the transformation is stored as a (Dim)x(Dim+1) matrix. * - #Projective: the transformation is stored as a (Dim+1)^2 matrix * without any assumption. * \tparam _Options has the same meaning as in class Matrix. It allows to specify DontAlign and/or RowMajor. * These Options are passed directly to the underlying matrix type. * * The homography is internally represented and stored by a matrix which * is available through the matrix() method. To understand the behavior of * this class you have to think a Transform object as its internal * matrix representation. The chosen convention is right multiply: * * \code v' = T * v \endcode * * Therefore, an affine transformation matrix M is shaped like this: * * \f$ \left( \begin{array}{cc} * linear & translation\\ * 0 ... 0 & 1 * \end{array} \right) \f$ * * Note that for a projective transformation the last row can be anything, * and then the interpretation of different parts might be sightly different. * * However, unlike a plain matrix, the Transform class provides many features * simplifying both its assembly and usage. In particular, it can be composed * with any other transformations (Transform,Translation,RotationBase,Matrix) * and can be directly used to transform implicit homogeneous vectors. All these * operations are handled via the operator*. For the composition of transformations, * its principle consists to first convert the right/left hand sides of the product * to a compatible (Dim+1)^2 matrix and then perform a pure matrix product. * Of course, internally, operator* tries to perform the minimal number of operations * according to the nature of each terms. Likewise, when applying the transform * to non homogeneous vectors, the latters are automatically promoted to homogeneous * one before doing the matrix product. The convertions to homogeneous representations * are performed as follow: * * \b Translation t (Dim)x(1): * \f$ \left( \begin{array}{cc} * I & t \\ * 0\,...\,0 & 1 * \end{array} \right) \f$ * * \b Rotation R (Dim)x(Dim): * \f$ \left( \begin{array}{cc} * R & 0\\ * 0\,...\,0 & 1 * \end{array} \right) \f$ * * \b Linear \b Matrix L (Dim)x(Dim): * \f$ \left( \begin{array}{cc} * L & 0\\ * 0\,...\,0 & 1 * \end{array} \right) \f$ * * \b Affine \b Matrix A (Dim)x(Dim+1): * \f$ \left( \begin{array}{c} * A\\ * 0\,...\,0\,1 * \end{array} \right) \f$ * * \b Column \b vector v (Dim)x(1): * \f$ \left( \begin{array}{c} * v\\ * 1 * \end{array} \right) \f$ * * \b Set \b of \b column \b vectors V1...Vn (Dim)x(n): * \f$ \left( \begin{array}{ccc} * v_1 & ... & v_n\\ * 1 & ... & 1 * \end{array} \right) \f$ * * The concatenation of a Transform object with any kind of other transformation * always returns a Transform object. * * A little exception to the "as pure matrix product" rule is the case of the * transformation of non homogeneous vectors by an affine transformation. In * that case the last matrix row can be ignored, and the product returns non * homogeneous vectors. * * Since, for instance, a Dim x Dim matrix is interpreted as a linear transformation, * it is not possible to directly transform Dim vectors stored in a Dim x Dim matrix. * The solution is either to use a Dim x Dynamic matrix or explicitly request a * vector transformation by making the vector homogeneous: * \code * m' = T * m.colwise().homogeneous(); * \endcode * Note that there is zero overhead. * * Conversion methods from/to Qt's QMatrix and QTransform are available if the * preprocessor token EIGEN_QT_SUPPORT is defined. * * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_TRANSFORM_PLUGIN. * * \sa class Matrix, class Quaternion */ template class Transform { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim==Dynamic ? Dynamic : (_Dim+1)*(_Dim+1)) enum { Mode = _Mode, Options = _Options, Dim = _Dim, ///< space dimension in which the transformation holds HDim = _Dim+1, ///< size of a respective homogeneous vector Rows = int(Mode)==(AffineCompact) ? Dim : HDim }; /** the scalar type of the coefficients */ typedef _Scalar Scalar; typedef DenseIndex Index; /** type of the matrix used to represent the transformation */ typedef typename internal::make_proper_matrix_type::type MatrixType; /** constified MatrixType */ typedef const MatrixType ConstMatrixType; /** type of the matrix used to represent the linear part of the transformation */ typedef Matrix LinearMatrixType; /** type of read/write reference to the linear part of the transformation */ typedef Block LinearPart; /** type of read reference to the linear part of the transformation */ typedef const Block ConstLinearPart; /** type of read/write reference to the affine part of the transformation */ typedef typename internal::conditional >::type AffinePart; /** type of read reference to the affine part of the transformation */ typedef typename internal::conditional >::type ConstAffinePart; /** type of a vector */ typedef Matrix VectorType; /** type of a read/write reference to the translation part of the rotation */ typedef Block TranslationPart; /** type of a read reference to the translation part of the rotation */ typedef const Block ConstTranslationPart; /** corresponding translation type */ typedef Translation TranslationType; // this intermediate enum is needed to avoid an ICE with gcc 3.4 and 4.0 enum { TransformTimeDiagonalMode = ((Mode==int(Isometry))?Affine:int(Mode)) }; /** The return type of the product between a diagonal matrix and a transform */ typedef Transform TransformTimeDiagonalReturnType; protected: MatrixType m_matrix; public: /** Default constructor without initialization of the meaningful coefficients. * If Mode==Affine, then the last row is set to [0 ... 0 1] */ inline Transform() { check_template_params(); internal::transform_make_affine<(int(Mode)==Affine) ? Affine : AffineCompact>::run(m_matrix); } inline Transform(const Transform& other) { check_template_params(); m_matrix = other.m_matrix; } inline explicit Transform(const TranslationType& t) { check_template_params(); *this = t; } inline explicit Transform(const UniformScaling& s) { check_template_params(); *this = s; } template inline explicit Transform(const RotationBase& r) { check_template_params(); *this = r; } inline Transform& operator=(const Transform& other) { m_matrix = other.m_matrix; return *this; } typedef internal::transform_take_affine_part take_affine_part; /** Constructs and initializes a transformation from a Dim^2 or a (Dim+1)^2 matrix. */ template inline explicit Transform(const EigenBase& other) { EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY); check_template_params(); internal::transform_construct_from_matrix::run(this, other.derived()); } /** Set \c *this from a Dim^2 or (Dim+1)^2 matrix. */ template inline Transform& operator=(const EigenBase& other) { EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY); internal::transform_construct_from_matrix::run(this, other.derived()); return *this; } template inline Transform(const Transform& other) { check_template_params(); // only the options change, we can directly copy the matrices m_matrix = other.matrix(); } template inline Transform(const Transform& other) { check_template_params(); // prevent conversions as: // Affine | AffineCompact | Isometry = Projective EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(OtherMode==int(Projective), Mode==int(Projective)), YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION) // prevent conversions as: // Isometry = Affine | AffineCompact EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(OtherMode==int(Affine)||OtherMode==int(AffineCompact), Mode!=int(Isometry)), YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION) enum { ModeIsAffineCompact = Mode == int(AffineCompact), OtherModeIsAffineCompact = OtherMode == int(AffineCompact) }; if(ModeIsAffineCompact == OtherModeIsAffineCompact) { // We need the block expression because the code is compiled for all // combinations of transformations and will trigger a compile time error // if one tries to assign the matrices directly m_matrix.template block(0,0) = other.matrix().template block(0,0); makeAffine(); } else if(OtherModeIsAffineCompact) { typedef typename Transform::MatrixType OtherMatrixType; internal::transform_construct_from_matrix::run(this, other.matrix()); } else { // here we know that Mode == AffineCompact and OtherMode != AffineCompact. // if OtherMode were Projective, the static assert above would already have caught it. // So the only possibility is that OtherMode == Affine linear() = other.linear(); translation() = other.translation(); } } template Transform(const ReturnByValue& other) { check_template_params(); other.evalTo(*this); } template Transform& operator=(const ReturnByValue& other) { other.evalTo(*this); return *this; } #ifdef EIGEN_QT_SUPPORT inline Transform(const QMatrix& other); inline Transform& operator=(const QMatrix& other); inline QMatrix toQMatrix(void) const; inline Transform(const QTransform& other); inline Transform& operator=(const QTransform& other); inline QTransform toQTransform(void) const; #endif /** shortcut for m_matrix(row,col); * \sa MatrixBase::operator(Index,Index) const */ inline Scalar operator() (Index row, Index col) const { return m_matrix(row,col); } /** shortcut for m_matrix(row,col); * \sa MatrixBase::operator(Index,Index) */ inline Scalar& operator() (Index row, Index col) { return m_matrix(row,col); } /** \returns a read-only expression of the transformation matrix */ inline const MatrixType& matrix() const { return m_matrix; } /** \returns a writable expression of the transformation matrix */ inline MatrixType& matrix() { return m_matrix; } /** \returns a read-only expression of the linear part of the transformation */ inline ConstLinearPart linear() const { return ConstLinearPart(m_matrix,0,0); } /** \returns a writable expression of the linear part of the transformation */ inline LinearPart linear() { return LinearPart(m_matrix,0,0); } /** \returns a read-only expression of the Dim x HDim affine part of the transformation */ inline ConstAffinePart affine() const { return take_affine_part::run(m_matrix); } /** \returns a writable expression of the Dim x HDim affine part of the transformation */ inline AffinePart affine() { return take_affine_part::run(m_matrix); } /** \returns a read-only expression of the translation vector of the transformation */ inline ConstTranslationPart translation() const { return ConstTranslationPart(m_matrix,0,Dim); } /** \returns a writable expression of the translation vector of the transformation */ inline TranslationPart translation() { return TranslationPart(m_matrix,0,Dim); } /** \returns an expression of the product between the transform \c *this and a matrix expression \a other * * The right hand side \a other might be either: * \li a vector of size Dim, * \li an homogeneous vector of size Dim+1, * \li a set of vectors of size Dim x Dynamic, * \li a set of homogeneous vectors of size Dim+1 x Dynamic, * \li a linear transformation matrix of size Dim x Dim, * \li an affine transformation matrix of size Dim x Dim+1, * \li a transformation matrix of size Dim+1 x Dim+1. */ // note: this function is defined here because some compilers cannot find the respective declaration template EIGEN_STRONG_INLINE const typename internal::transform_right_product_impl::ResultType operator * (const EigenBase &other) const { return internal::transform_right_product_impl::run(*this,other.derived()); } /** \returns the product expression of a transformation matrix \a a times a transform \a b * * The left hand side \a other might be either: * \li a linear transformation matrix of size Dim x Dim, * \li an affine transformation matrix of size Dim x Dim+1, * \li a general transformation matrix of size Dim+1 x Dim+1. */ template friend inline const typename internal::transform_left_product_impl::ResultType operator * (const EigenBase &a, const Transform &b) { return internal::transform_left_product_impl::run(a.derived(),b); } /** \returns The product expression of a transform \a a times a diagonal matrix \a b * * The rhs diagonal matrix is interpreted as an affine scaling transformation. The * product results in a Transform of the same type (mode) as the lhs only if the lhs * mode is no isometry. In that case, the returned transform is an affinity. */ template inline const TransformTimeDiagonalReturnType operator * (const DiagonalBase &b) const { TransformTimeDiagonalReturnType res(*this); res.linear() *= b; return res; } /** \returns The product expression of a diagonal matrix \a a times a transform \a b * * The lhs diagonal matrix is interpreted as an affine scaling transformation. The * product results in a Transform of the same type (mode) as the lhs only if the lhs * mode is no isometry. In that case, the returned transform is an affinity. */ template friend inline TransformTimeDiagonalReturnType operator * (const DiagonalBase &a, const Transform &b) { TransformTimeDiagonalReturnType res; res.linear().noalias() = a*b.linear(); res.translation().noalias() = a*b.translation(); if (Mode!=int(AffineCompact)) res.matrix().row(Dim) = b.matrix().row(Dim); return res; } template inline Transform& operator*=(const EigenBase& other) { return *this = *this * other; } /** Concatenates two transformations */ inline const Transform operator * (const Transform& other) const { return internal::transform_transform_product_impl::run(*this,other); } #ifdef __INTEL_COMPILER private: // this intermediate structure permits to workaround a bug in ICC 11: // error: template instantiation resulted in unexpected function type of "Eigen::Transform // (const Eigen::Transform &) const" // (the meaning of a name may have changed since the template declaration -- the type of the template is: // "Eigen::internal::transform_transform_product_impl, // Eigen::Transform, >::ResultType (const Eigen::Transform &) const") // template struct icc_11_workaround { typedef internal::transform_transform_product_impl > ProductType; typedef typename ProductType::ResultType ResultType; }; public: /** Concatenates two different transformations */ template inline typename icc_11_workaround::ResultType operator * (const Transform& other) const { typedef typename icc_11_workaround::ProductType ProductType; return ProductType::run(*this,other); } #else /** Concatenates two different transformations */ template inline typename internal::transform_transform_product_impl >::ResultType operator * (const Transform& other) const { return internal::transform_transform_product_impl >::run(*this,other); } #endif /** \sa MatrixBase::setIdentity() */ void setIdentity() { m_matrix.setIdentity(); } /** * \brief Returns an identity transformation. * \todo In the future this function should be returning a Transform expression. */ static const Transform Identity() { return Transform(MatrixType::Identity()); } template inline Transform& scale(const MatrixBase &other); template inline Transform& prescale(const MatrixBase &other); inline Transform& scale(const Scalar& s); inline Transform& prescale(const Scalar& s); template inline Transform& translate(const MatrixBase &other); template inline Transform& pretranslate(const MatrixBase &other); template inline Transform& rotate(const RotationType& rotation); template inline Transform& prerotate(const RotationType& rotation); Transform& shear(const Scalar& sx, const Scalar& sy); Transform& preshear(const Scalar& sx, const Scalar& sy); inline Transform& operator=(const TranslationType& t); inline Transform& operator*=(const TranslationType& t) { return translate(t.vector()); } inline Transform operator*(const TranslationType& t) const; inline Transform& operator=(const UniformScaling& t); inline Transform& operator*=(const UniformScaling& s) { return scale(s.factor()); } inline Transform operator*(const UniformScaling& s) const { Transform res = *this; res.scale(s.factor()); return res; } inline Transform& operator*=(const DiagonalMatrix& s) { linear() *= s; return *this; } template inline Transform& operator=(const RotationBase& r); template inline Transform& operator*=(const RotationBase& r) { return rotate(r.toRotationMatrix()); } template inline Transform operator*(const RotationBase& r) const; const LinearMatrixType rotation() const; template void computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const; template void computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const; template Transform& fromPositionOrientationScale(const MatrixBase &position, const OrientationType& orientation, const MatrixBase &scale); inline Transform inverse(TransformTraits traits = (TransformTraits)Mode) const; /** \returns a const pointer to the column major internal matrix */ const Scalar* data() const { return m_matrix.data(); } /** \returns a non-const pointer to the column major internal matrix */ Scalar* data() { return m_matrix.data(); } /** \returns \c *this with scalar type casted to \a NewScalarType * * Note that if \a NewScalarType is equal to the current scalar type of \c *this * then this function smartly returns a const reference to \c *this. */ template inline typename internal::cast_return_type >::type cast() const { return typename internal::cast_return_type >::type(*this); } /** Copy constructor with scalar type conversion */ template inline explicit Transform(const Transform& other) { check_template_params(); m_matrix = other.matrix().template cast(); } /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. * * \sa MatrixBase::isApprox() */ bool isApprox(const Transform& other, const typename NumTraits::Real& prec = NumTraits::dummy_precision()) const { return m_matrix.isApprox(other.m_matrix, prec); } /** Sets the last row to [0 ... 0 1] */ void makeAffine() { internal::transform_make_affine::run(m_matrix); } /** \internal * \returns the Dim x Dim linear part if the transformation is affine, * and the HDim x Dim part for projective transformations. */ inline Block linearExt() { return m_matrix.template block(0,0); } /** \internal * \returns the Dim x Dim linear part if the transformation is affine, * and the HDim x Dim part for projective transformations. */ inline const Block linearExt() const { return m_matrix.template block(0,0); } /** \internal * \returns the translation part if the transformation is affine, * and the last column for projective transformations. */ inline Block translationExt() { return m_matrix.template block(0,Dim); } /** \internal * \returns the translation part if the transformation is affine, * and the last column for projective transformations. */ inline const Block translationExt() const { return m_matrix.template block(0,Dim); } #ifdef EIGEN_TRANSFORM_PLUGIN #include EIGEN_TRANSFORM_PLUGIN #endif protected: #ifndef EIGEN_PARSED_BY_DOXYGEN static EIGEN_STRONG_INLINE void check_template_params() { EIGEN_STATIC_ASSERT((Options & (DontAlign|RowMajor)) == Options, INVALID_MATRIX_TEMPLATE_PARAMETERS) } #endif }; /** \ingroup Geometry_Module */ typedef Transform Isometry2f; /** \ingroup Geometry_Module */ typedef Transform Isometry3f; /** \ingroup Geometry_Module */ typedef Transform Isometry2d; /** \ingroup Geometry_Module */ typedef Transform Isometry3d; /** \ingroup Geometry_Module */ typedef Transform Affine2f; /** \ingroup Geometry_Module */ typedef Transform Affine3f; /** \ingroup Geometry_Module */ typedef Transform Affine2d; /** \ingroup Geometry_Module */ typedef Transform Affine3d; /** \ingroup Geometry_Module */ typedef Transform AffineCompact2f; /** \ingroup Geometry_Module */ typedef Transform AffineCompact3f; /** \ingroup Geometry_Module */ typedef Transform AffineCompact2d; /** \ingroup Geometry_Module */ typedef Transform AffineCompact3d; /** \ingroup Geometry_Module */ typedef Transform Projective2f; /** \ingroup Geometry_Module */ typedef Transform Projective3f; /** \ingroup Geometry_Module */ typedef Transform Projective2d; /** \ingroup Geometry_Module */ typedef Transform Projective3d; /************************** *** Optional QT support *** **************************/ #ifdef EIGEN_QT_SUPPORT /** Initializes \c *this from a QMatrix assuming the dimension is 2. * * This function is available only if the token EIGEN_QT_SUPPORT is defined. */ template Transform::Transform(const QMatrix& other) { check_template_params(); *this = other; } /** Set \c *this from a QMatrix assuming the dimension is 2. * * This function is available only if the token EIGEN_QT_SUPPORT is defined. */ template Transform& Transform::operator=(const QMatrix& other) { EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE) m_matrix << other.m11(), other.m21(), other.dx(), other.m12(), other.m22(), other.dy(), 0, 0, 1; return *this; } /** \returns a QMatrix from \c *this assuming the dimension is 2. * * \warning this conversion might loss data if \c *this is not affine * * This function is available only if the token EIGEN_QT_SUPPORT is defined. */ template QMatrix Transform::toQMatrix(void) const { check_template_params(); EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE) return QMatrix(m_matrix.coeff(0,0), m_matrix.coeff(1,0), m_matrix.coeff(0,1), m_matrix.coeff(1,1), m_matrix.coeff(0,2), m_matrix.coeff(1,2)); } /** Initializes \c *this from a QTransform assuming the dimension is 2. * * This function is available only if the token EIGEN_QT_SUPPORT is defined. */ template Transform::Transform(const QTransform& other) { check_template_params(); *this = other; } /** Set \c *this from a QTransform assuming the dimension is 2. * * This function is available only if the token EIGEN_QT_SUPPORT is defined. */ template Transform& Transform::operator=(const QTransform& other) { check_template_params(); EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE) if (Mode == int(AffineCompact)) m_matrix << other.m11(), other.m21(), other.dx(), other.m12(), other.m22(), other.dy(); else m_matrix << other.m11(), other.m21(), other.dx(), other.m12(), other.m22(), other.dy(), other.m13(), other.m23(), other.m33(); return *this; } /** \returns a QTransform from \c *this assuming the dimension is 2. * * This function is available only if the token EIGEN_QT_SUPPORT is defined. */ template QTransform Transform::toQTransform(void) const { EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE) if (Mode == int(AffineCompact)) return QTransform(m_matrix.coeff(0,0), m_matrix.coeff(1,0), m_matrix.coeff(0,1), m_matrix.coeff(1,1), m_matrix.coeff(0,2), m_matrix.coeff(1,2)); else return QTransform(m_matrix.coeff(0,0), m_matrix.coeff(1,0), m_matrix.coeff(2,0), m_matrix.coeff(0,1), m_matrix.coeff(1,1), m_matrix.coeff(2,1), m_matrix.coeff(0,2), m_matrix.coeff(1,2), m_matrix.coeff(2,2)); } #endif /********************* *** Procedural API *** *********************/ /** Applies on the right the non uniform scale transformation represented * by the vector \a other to \c *this and returns a reference to \c *this. * \sa prescale() */ template template Transform& Transform::scale(const MatrixBase &other) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim)) EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS) linearExt().noalias() = (linearExt() * other.asDiagonal()); return *this; } /** Applies on the right a uniform scale of a factor \a c to \c *this * and returns a reference to \c *this. * \sa prescale(Scalar) */ template inline Transform& Transform::scale(const Scalar& s) { EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS) linearExt() *= s; return *this; } /** Applies on the left the non uniform scale transformation represented * by the vector \a other to \c *this and returns a reference to \c *this. * \sa scale() */ template template Transform& Transform::prescale(const MatrixBase &other) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim)) EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS) m_matrix.template block(0,0).noalias() = (other.asDiagonal() * m_matrix.template block(0,0)); return *this; } /** Applies on the left a uniform scale of a factor \a c to \c *this * and returns a reference to \c *this. * \sa scale(Scalar) */ template inline Transform& Transform::prescale(const Scalar& s) { EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS) m_matrix.template topRows() *= s; return *this; } /** Applies on the right the translation matrix represented by the vector \a other * to \c *this and returns a reference to \c *this. * \sa pretranslate() */ template template Transform& Transform::translate(const MatrixBase &other) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim)) translationExt() += linearExt() * other; return *this; } /** Applies on the left the translation matrix represented by the vector \a other * to \c *this and returns a reference to \c *this. * \sa translate() */ template template Transform& Transform::pretranslate(const MatrixBase &other) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim)) if(int(Mode)==int(Projective)) affine() += other * m_matrix.row(Dim); else translation() += other; return *this; } /** Applies on the right the rotation represented by the rotation \a rotation * to \c *this and returns a reference to \c *this. * * The template parameter \a RotationType is the type of the rotation which * must be known by internal::toRotationMatrix<>. * * Natively supported types includes: * - any scalar (2D), * - a Dim x Dim matrix expression, * - a Quaternion (3D), * - a AngleAxis (3D) * * This mechanism is easily extendable to support user types such as Euler angles, * or a pair of Quaternion for 4D rotations. * * \sa rotate(Scalar), class Quaternion, class AngleAxis, prerotate(RotationType) */ template template Transform& Transform::rotate(const RotationType& rotation) { linearExt() *= internal::toRotationMatrix(rotation); return *this; } /** Applies on the left the rotation represented by the rotation \a rotation * to \c *this and returns a reference to \c *this. * * See rotate() for further details. * * \sa rotate() */ template template Transform& Transform::prerotate(const RotationType& rotation) { m_matrix.template block(0,0) = internal::toRotationMatrix(rotation) * m_matrix.template block(0,0); return *this; } /** Applies on the right the shear transformation represented * by the vector \a other to \c *this and returns a reference to \c *this. * \warning 2D only. * \sa preshear() */ template Transform& Transform::shear(const Scalar& sx, const Scalar& sy) { EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE) EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS) VectorType tmp = linear().col(0)*sy + linear().col(1); linear() << linear().col(0) + linear().col(1)*sx, tmp; return *this; } /** Applies on the left the shear transformation represented * by the vector \a other to \c *this and returns a reference to \c *this. * \warning 2D only. * \sa shear() */ template Transform& Transform::preshear(const Scalar& sx, const Scalar& sy) { EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE) EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS) m_matrix.template block(0,0) = LinearMatrixType(1, sx, sy, 1) * m_matrix.template block(0,0); return *this; } /****************************************************** *** Scaling, Translation and Rotation compatibility *** ******************************************************/ template inline Transform& Transform::operator=(const TranslationType& t) { linear().setIdentity(); translation() = t.vector(); makeAffine(); return *this; } template inline Transform Transform::operator*(const TranslationType& t) const { Transform res = *this; res.translate(t.vector()); return res; } template inline Transform& Transform::operator=(const UniformScaling& s) { m_matrix.setZero(); linear().diagonal().fill(s.factor()); makeAffine(); return *this; } template template inline Transform& Transform::operator=(const RotationBase& r) { linear() = internal::toRotationMatrix(r); translation().setZero(); makeAffine(); return *this; } template template inline Transform Transform::operator*(const RotationBase& r) const { Transform res = *this; res.rotate(r.derived()); return res; } /************************ *** Special functions *** ************************/ /** \returns the rotation part of the transformation * * * \svd_module * * \sa computeRotationScaling(), computeScalingRotation(), class SVD */ template const typename Transform::LinearMatrixType Transform::rotation() const { LinearMatrixType result; computeRotationScaling(&result, (LinearMatrixType*)0); return result; } /** decomposes the linear part of the transformation as a product rotation x scaling, the scaling being * not necessarily positive. * * If either pointer is zero, the corresponding computation is skipped. * * * * \svd_module * * \sa computeScalingRotation(), rotation(), class SVD */ template template void Transform::computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const { JacobiSVD svd(linear(), ComputeFullU | ComputeFullV); Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant(); // so x has absolute value 1 VectorType sv(svd.singularValues()); sv.coeffRef(0) *= x; if(scaling) scaling->lazyAssign(svd.matrixV() * sv.asDiagonal() * svd.matrixV().adjoint()); if(rotation) { LinearMatrixType m(svd.matrixU()); m.col(0) /= x; rotation->lazyAssign(m * svd.matrixV().adjoint()); } } /** decomposes the linear part of the transformation as a product rotation x scaling, the scaling being * not necessarily positive. * * If either pointer is zero, the corresponding computation is skipped. * * * * \svd_module * * \sa computeRotationScaling(), rotation(), class SVD */ template template void Transform::computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const { JacobiSVD svd(linear(), ComputeFullU | ComputeFullV); Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant(); // so x has absolute value 1 VectorType sv(svd.singularValues()); sv.coeffRef(0) *= x; if(scaling) scaling->lazyAssign(svd.matrixU() * sv.asDiagonal() * svd.matrixU().adjoint()); if(rotation) { LinearMatrixType m(svd.matrixU()); m.col(0) /= x; rotation->lazyAssign(m * svd.matrixV().adjoint()); } } /** Convenient method to set \c *this from a position, orientation and scale * of a 3D object. */ template template Transform& Transform::fromPositionOrientationScale(const MatrixBase &position, const OrientationType& orientation, const MatrixBase &scale) { linear() = internal::toRotationMatrix(orientation); linear() *= scale.asDiagonal(); translation() = position; makeAffine(); return *this; } namespace internal { template struct transform_make_affine { template static void run(MatrixType &mat) { static const int Dim = MatrixType::ColsAtCompileTime-1; mat.template block<1,Dim>(Dim,0).setZero(); mat.coeffRef(Dim,Dim) = typename MatrixType::Scalar(1); } }; template<> struct transform_make_affine { template static void run(MatrixType &) { } }; // selector needed to avoid taking the inverse of a 3x4 matrix template struct projective_transform_inverse { static inline void run(const TransformType&, TransformType&) {} }; template struct projective_transform_inverse { static inline void run(const TransformType& m, TransformType& res) { res.matrix() = m.matrix().inverse(); } }; } // end namespace internal /** * * \returns the inverse transformation according to some given knowledge * on \c *this. * * \param hint allows to optimize the inversion process when the transformation * is known to be not a general transformation (optional). The possible values are: * - #Projective if the transformation is not necessarily affine, i.e., if the * last row is not guaranteed to be [0 ... 0 1] * - #Affine if the last row can be assumed to be [0 ... 0 1] * - #Isometry if the transformation is only a concatenations of translations * and rotations. * The default is the template class parameter \c Mode. * * \warning unless \a traits is always set to NoShear or NoScaling, this function * requires the generic inverse method of MatrixBase defined in the LU module. If * you forget to include this module, then you will get hard to debug linking errors. * * \sa MatrixBase::inverse() */ template Transform Transform::inverse(TransformTraits hint) const { Transform res; if (hint == Projective) { internal::projective_transform_inverse::run(*this, res); } else { if (hint == Isometry) { res.matrix().template topLeftCorner() = linear().transpose(); } else if(hint&Affine) { res.matrix().template topLeftCorner() = linear().inverse(); } else { eigen_assert(false && "Invalid transform traits in Transform::Inverse"); } // translation and remaining parts res.matrix().template topRightCorner() = - res.matrix().template topLeftCorner() * translation(); res.makeAffine(); // we do need this, because in the beginning res is uninitialized } return res; } namespace internal { /***************************************************** *** Specializations of take affine part *** *****************************************************/ template struct transform_take_affine_part { typedef typename TransformType::MatrixType MatrixType; typedef typename TransformType::AffinePart AffinePart; typedef typename TransformType::ConstAffinePart ConstAffinePart; static inline AffinePart run(MatrixType& m) { return m.template block(0,0); } static inline ConstAffinePart run(const MatrixType& m) { return m.template block(0,0); } }; template struct transform_take_affine_part > { typedef typename Transform::MatrixType MatrixType; static inline MatrixType& run(MatrixType& m) { return m; } static inline const MatrixType& run(const MatrixType& m) { return m; } }; /***************************************************** *** Specializations of construct from matrix *** *****************************************************/ template struct transform_construct_from_matrix { static inline void run(Transform *transform, const Other& other) { transform->linear() = other; transform->translation().setZero(); transform->makeAffine(); } }; template struct transform_construct_from_matrix { static inline void run(Transform *transform, const Other& other) { transform->affine() = other; transform->makeAffine(); } }; template struct transform_construct_from_matrix { static inline void run(Transform *transform, const Other& other) { transform->matrix() = other; } }; template struct transform_construct_from_matrix { static inline void run(Transform *transform, const Other& other) { transform->matrix() = other.template block(0,0); } }; /********************************************************** *** Specializations of operator* with rhs EigenBase *** **********************************************************/ template struct transform_product_result { enum { Mode = (LhsMode == (int)Projective || RhsMode == (int)Projective ) ? Projective : (LhsMode == (int)Affine || RhsMode == (int)Affine ) ? Affine : (LhsMode == (int)AffineCompact || RhsMode == (int)AffineCompact ) ? AffineCompact : (LhsMode == (int)Isometry || RhsMode == (int)Isometry ) ? Isometry : Projective }; }; template< typename TransformType, typename MatrixType > struct transform_right_product_impl< TransformType, MatrixType, 0 > { typedef typename MatrixType::PlainObject ResultType; static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other) { return T.matrix() * other; } }; template< typename TransformType, typename MatrixType > struct transform_right_product_impl< TransformType, MatrixType, 1 > { enum { Dim = TransformType::Dim, HDim = TransformType::HDim, OtherRows = MatrixType::RowsAtCompileTime, OtherCols = MatrixType::ColsAtCompileTime }; typedef typename MatrixType::PlainObject ResultType; static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other) { EIGEN_STATIC_ASSERT(OtherRows==HDim, YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES); typedef Block TopLeftLhs; ResultType res(other.rows(),other.cols()); TopLeftLhs(res, 0, 0, Dim, other.cols()).noalias() = T.affine() * other; res.row(OtherRows-1) = other.row(OtherRows-1); return res; } }; template< typename TransformType, typename MatrixType > struct transform_right_product_impl< TransformType, MatrixType, 2 > { enum { Dim = TransformType::Dim, HDim = TransformType::HDim, OtherRows = MatrixType::RowsAtCompileTime, OtherCols = MatrixType::ColsAtCompileTime }; typedef typename MatrixType::PlainObject ResultType; static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other) { EIGEN_STATIC_ASSERT(OtherRows==Dim, YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES); typedef Block TopLeftLhs; ResultType res(Replicate(T.translation(),1,other.cols())); TopLeftLhs(res, 0, 0, Dim, other.cols()).noalias() += T.linear() * other; return res; } }; /********************************************************** *** Specializations of operator* with lhs EigenBase *** **********************************************************/ // generic HDim x HDim matrix * T => Projective template struct transform_left_product_impl { typedef Transform TransformType; typedef typename TransformType::MatrixType MatrixType; typedef Transform ResultType; static ResultType run(const Other& other,const TransformType& tr) { return ResultType(other * tr.matrix()); } }; // generic HDim x HDim matrix * AffineCompact => Projective template struct transform_left_product_impl { typedef Transform TransformType; typedef typename TransformType::MatrixType MatrixType; typedef Transform ResultType; static ResultType run(const Other& other,const TransformType& tr) { ResultType res; res.matrix().noalias() = other.template block(0,0) * tr.matrix(); res.matrix().col(Dim) += other.col(Dim); return res; } }; // affine matrix * T template struct transform_left_product_impl { typedef Transform TransformType; typedef typename TransformType::MatrixType MatrixType; typedef TransformType ResultType; static ResultType run(const Other& other,const TransformType& tr) { ResultType res; res.affine().noalias() = other * tr.matrix(); res.matrix().row(Dim) = tr.matrix().row(Dim); return res; } }; // affine matrix * AffineCompact template struct transform_left_product_impl { typedef Transform TransformType; typedef typename TransformType::MatrixType MatrixType; typedef TransformType ResultType; static ResultType run(const Other& other,const TransformType& tr) { ResultType res; res.matrix().noalias() = other.template block(0,0) * tr.matrix(); res.translation() += other.col(Dim); return res; } }; // linear matrix * T template struct transform_left_product_impl { typedef Transform TransformType; typedef typename TransformType::MatrixType MatrixType; typedef TransformType ResultType; static ResultType run(const Other& other, const TransformType& tr) { TransformType res; if(Mode!=int(AffineCompact)) res.matrix().row(Dim) = tr.matrix().row(Dim); res.matrix().template topRows().noalias() = other * tr.matrix().template topRows(); return res; } }; /********************************************************** *** Specializations of operator* with another Transform *** **********************************************************/ template struct transform_transform_product_impl,Transform,false > { enum { ResultMode = transform_product_result::Mode }; typedef Transform Lhs; typedef Transform Rhs; typedef Transform ResultType; static ResultType run(const Lhs& lhs, const Rhs& rhs) { ResultType res; res.linear() = lhs.linear() * rhs.linear(); res.translation() = lhs.linear() * rhs.translation() + lhs.translation(); res.makeAffine(); return res; } }; template struct transform_transform_product_impl,Transform,true > { typedef Transform Lhs; typedef Transform Rhs; typedef Transform ResultType; static ResultType run(const Lhs& lhs, const Rhs& rhs) { return ResultType( lhs.matrix() * rhs.matrix() ); } }; template struct transform_transform_product_impl,Transform,true > { typedef Transform Lhs; typedef Transform Rhs; typedef Transform ResultType; static ResultType run(const Lhs& lhs, const Rhs& rhs) { ResultType res; res.matrix().template topRows() = lhs.matrix() * rhs.matrix(); res.matrix().row(Dim) = rhs.matrix().row(Dim); return res; } }; template struct transform_transform_product_impl,Transform,true > { typedef Transform Lhs; typedef Transform Rhs; typedef Transform ResultType; static ResultType run(const Lhs& lhs, const Rhs& rhs) { ResultType res(lhs.matrix().template leftCols() * rhs.matrix()); res.matrix().col(Dim) += lhs.matrix().col(Dim); return res; } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_TRANSFORM_H antimony/lib/fab/vendor/Eigen/src/Geometry/EulerAngles.h0000644000175000017500000000665113341130426022474 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_EULERANGLES_H #define EIGEN_EULERANGLES_H namespace Eigen { /** \geometry_module \ingroup Geometry_Module * * * \returns the Euler-angles of the rotation matrix \c *this using the convention defined by the triplet (\a a0,\a a1,\a a2) * * Each of the three parameters \a a0,\a a1,\a a2 represents the respective rotation axis as an integer in {0,1,2}. * For instance, in: * \code Vector3f ea = mat.eulerAngles(2, 0, 2); \endcode * "2" represents the z axis and "0" the x axis, etc. The returned angles are such that * we have the following equality: * \code * mat == AngleAxisf(ea[0], Vector3f::UnitZ()) * * AngleAxisf(ea[1], Vector3f::UnitX()) * * AngleAxisf(ea[2], Vector3f::UnitZ()); \endcode * This corresponds to the right-multiply conventions (with right hand side frames). * * The returned angles are in the ranges [0:pi]x[-pi:pi]x[-pi:pi]. * * \sa class AngleAxis */ template inline Matrix::Scalar,3,1> MatrixBase::eulerAngles(Index a0, Index a1, Index a2) const { using std::atan2; using std::sin; using std::cos; /* Implemented from Graphics Gems IV */ EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived,3,3) Matrix res; typedef Matrix Vector2; const Index odd = ((a0+1)%3 == a1) ? 0 : 1; const Index i = a0; const Index j = (a0 + 1 + odd)%3; const Index k = (a0 + 2 - odd)%3; if (a0==a2) { res[0] = atan2(coeff(j,i), coeff(k,i)); if((odd && res[0]Scalar(0))) { res[0] = (res[0] > Scalar(0)) ? res[0] - Scalar(M_PI) : res[0] + Scalar(M_PI); Scalar s2 = Vector2(coeff(j,i), coeff(k,i)).norm(); res[1] = -atan2(s2, coeff(i,i)); } else { Scalar s2 = Vector2(coeff(j,i), coeff(k,i)).norm(); res[1] = atan2(s2, coeff(i,i)); } // With a=(0,1,0), we have i=0; j=1; k=2, and after computing the first two angles, // we can compute their respective rotation, and apply its inverse to M. Since the result must // be a rotation around x, we have: // // c2 s1.s2 c1.s2 1 0 0 // 0 c1 -s1 * M = 0 c3 s3 // -s2 s1.c2 c1.c2 0 -s3 c3 // // Thus: m11.c1 - m21.s1 = c3 & m12.c1 - m22.s1 = s3 Scalar s1 = sin(res[0]); Scalar c1 = cos(res[0]); res[2] = atan2(c1*coeff(j,k)-s1*coeff(k,k), c1*coeff(j,j) - s1 * coeff(k,j)); } else { res[0] = atan2(coeff(j,k), coeff(k,k)); Scalar c2 = Vector2(coeff(i,i), coeff(i,j)).norm(); if((odd && res[0]Scalar(0))) { res[0] = (res[0] > Scalar(0)) ? res[0] - Scalar(M_PI) : res[0] + Scalar(M_PI); res[1] = atan2(-coeff(i,k), -c2); } else res[1] = atan2(-coeff(i,k), c2); Scalar s1 = sin(res[0]); Scalar c1 = cos(res[0]); res[2] = atan2(s1*coeff(k,i)-c1*coeff(j,i), c1*coeff(j,j) - s1 * coeff(k,j)); } if (!odd) res = -res; return res; } } // end namespace Eigen #endif // EIGEN_EULERANGLES_H antimony/lib/fab/vendor/Eigen/src/Geometry/ParametrizedLine.h0000644000175000017500000001713013341130426023517 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // Copyright (C) 2008 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PARAMETRIZEDLINE_H #define EIGEN_PARAMETRIZEDLINE_H namespace Eigen { /** \geometry_module \ingroup Geometry_Module * * \class ParametrizedLine * * \brief A parametrized line * * A parametrized line is defined by an origin point \f$ \mathbf{o} \f$ and a unit * direction vector \f$ \mathbf{d} \f$ such that the line corresponds to * the set \f$ l(t) = \mathbf{o} + t \mathbf{d} \f$, \f$ t \in \mathbf{R} \f$. * * \param _Scalar the scalar type, i.e., the type of the coefficients * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic. */ template class ParametrizedLine { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) enum { AmbientDimAtCompileTime = _AmbientDim, Options = _Options }; typedef _Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef DenseIndex Index; typedef Matrix VectorType; /** Default constructor without initialization */ inline ParametrizedLine() {} template ParametrizedLine(const ParametrizedLine& other) : m_origin(other.origin()), m_direction(other.direction()) {} /** Constructs a dynamic-size line with \a _dim the dimension * of the ambient space */ inline explicit ParametrizedLine(Index _dim) : m_origin(_dim), m_direction(_dim) {} /** Initializes a parametrized line of direction \a direction and origin \a origin. * \warning the vector direction is assumed to be normalized. */ ParametrizedLine(const VectorType& origin, const VectorType& direction) : m_origin(origin), m_direction(direction) {} template explicit ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane); /** Constructs a parametrized line going from \a p0 to \a p1. */ static inline ParametrizedLine Through(const VectorType& p0, const VectorType& p1) { return ParametrizedLine(p0, (p1-p0).normalized()); } ~ParametrizedLine() {} /** \returns the dimension in which the line holds */ inline Index dim() const { return m_direction.size(); } const VectorType& origin() const { return m_origin; } VectorType& origin() { return m_origin; } const VectorType& direction() const { return m_direction; } VectorType& direction() { return m_direction; } /** \returns the squared distance of a point \a p to its projection onto the line \c *this. * \sa distance() */ RealScalar squaredDistance(const VectorType& p) const { VectorType diff = p - origin(); return (diff - direction().dot(diff) * direction()).squaredNorm(); } /** \returns the distance of a point \a p to its projection onto the line \c *this. * \sa squaredDistance() */ RealScalar distance(const VectorType& p) const { using std::sqrt; return sqrt(squaredDistance(p)); } /** \returns the projection of a point \a p onto the line \c *this. */ VectorType projection(const VectorType& p) const { return origin() + direction().dot(p-origin()) * direction(); } VectorType pointAt(const Scalar& t) const; template Scalar intersectionParameter(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const; template Scalar intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const; template VectorType intersectionPoint(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const; /** \returns \c *this with scalar type casted to \a NewScalarType * * Note that if \a NewScalarType is equal to the current scalar type of \c *this * then this function smartly returns a const reference to \c *this. */ template inline typename internal::cast_return_type >::type cast() const { return typename internal::cast_return_type >::type(*this); } /** Copy constructor with scalar type conversion */ template inline explicit ParametrizedLine(const ParametrizedLine& other) { m_origin = other.origin().template cast(); m_direction = other.direction().template cast(); } /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. * * \sa MatrixBase::isApprox() */ bool isApprox(const ParametrizedLine& other, typename NumTraits::Real prec = NumTraits::dummy_precision()) const { return m_origin.isApprox(other.m_origin, prec) && m_direction.isApprox(other.m_direction, prec); } protected: VectorType m_origin, m_direction; }; /** Constructs a parametrized line from a 2D hyperplane * * \warning the ambient space must have dimension 2 such that the hyperplane actually describes a line */ template template inline ParametrizedLine<_Scalar, _AmbientDim,_Options>::ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim,OtherOptions>& hyperplane) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2) direction() = hyperplane.normal().unitOrthogonal(); origin() = -hyperplane.normal()*hyperplane.offset(); } /** \returns the point at \a t along this line */ template inline typename ParametrizedLine<_Scalar, _AmbientDim,_Options>::VectorType ParametrizedLine<_Scalar, _AmbientDim,_Options>::pointAt(const _Scalar& t) const { return origin() + (direction()*t); } /** \returns the parameter value of the intersection between \c *this and the given \a hyperplane */ template template inline _Scalar ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersectionParameter(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const { return -(hyperplane.offset()+hyperplane.normal().dot(origin())) / hyperplane.normal().dot(direction()); } /** \deprecated use intersectionParameter() * \returns the parameter value of the intersection between \c *this and the given \a hyperplane */ template template inline _Scalar ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const { return intersectionParameter(hyperplane); } /** \returns the point of the intersection between \c *this and the given hyperplane */ template template inline typename ParametrizedLine<_Scalar, _AmbientDim,_Options>::VectorType ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersectionPoint(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const { return pointAt(intersectionParameter(hyperplane)); } } // end namespace Eigen #endif // EIGEN_PARAMETRIZEDLINE_H antimony/lib/fab/vendor/Eigen/src/Geometry/Rotation2D.h0000644000175000017500000001216113341130426022244 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ROTATION2D_H #define EIGEN_ROTATION2D_H namespace Eigen { /** \geometry_module \ingroup Geometry_Module * * \class Rotation2D * * \brief Represents a rotation/orientation in a 2 dimensional space. * * \param _Scalar the scalar type, i.e., the type of the coefficients * * This class is equivalent to a single scalar representing a counter clock wise rotation * as a single angle in radian. It provides some additional features such as the automatic * conversion from/to a 2x2 rotation matrix. Moreover this class aims to provide a similar * interface to Quaternion in order to facilitate the writing of generic algorithms * dealing with rotations. * * \sa class Quaternion, class Transform */ namespace internal { template struct traits > { typedef _Scalar Scalar; }; } // end namespace internal template class Rotation2D : public RotationBase,2> { typedef RotationBase,2> Base; public: using Base::operator*; enum { Dim = 2 }; /** the scalar type of the coefficients */ typedef _Scalar Scalar; typedef Matrix Vector2; typedef Matrix Matrix2; protected: Scalar m_angle; public: /** Construct a 2D counter clock wise rotation from the angle \a a in radian. */ inline Rotation2D(const Scalar& a) : m_angle(a) {} /** Default constructor wihtout initialization. The represented rotation is undefined. */ Rotation2D() {} /** \returns the rotation angle */ inline Scalar angle() const { return m_angle; } /** \returns a read-write reference to the rotation angle */ inline Scalar& angle() { return m_angle; } /** \returns the inverse rotation */ inline Rotation2D inverse() const { return -m_angle; } /** Concatenates two rotations */ inline Rotation2D operator*(const Rotation2D& other) const { return m_angle + other.m_angle; } /** Concatenates two rotations */ inline Rotation2D& operator*=(const Rotation2D& other) { m_angle += other.m_angle; return *this; } /** Applies the rotation to a 2D vector */ Vector2 operator* (const Vector2& vec) const { return toRotationMatrix() * vec; } template Rotation2D& fromRotationMatrix(const MatrixBase& m); Matrix2 toRotationMatrix() const; /** \returns the spherical interpolation between \c *this and \a other using * parameter \a t. It is in fact equivalent to a linear interpolation. */ inline Rotation2D slerp(const Scalar& t, const Rotation2D& other) const { return m_angle * (1-t) + other.angle() * t; } /** \returns \c *this with scalar type casted to \a NewScalarType * * Note that if \a NewScalarType is equal to the current scalar type of \c *this * then this function smartly returns a const reference to \c *this. */ template inline typename internal::cast_return_type >::type cast() const { return typename internal::cast_return_type >::type(*this); } /** Copy constructor with scalar type conversion */ template inline explicit Rotation2D(const Rotation2D& other) { m_angle = Scalar(other.angle()); } static inline Rotation2D Identity() { return Rotation2D(0); } /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. * * \sa MatrixBase::isApprox() */ bool isApprox(const Rotation2D& other, const typename NumTraits::Real& prec = NumTraits::dummy_precision()) const { return internal::isApprox(m_angle,other.m_angle, prec); } }; /** \ingroup Geometry_Module * single precision 2D rotation type */ typedef Rotation2D Rotation2Df; /** \ingroup Geometry_Module * double precision 2D rotation type */ typedef Rotation2D Rotation2Dd; /** Set \c *this from a 2x2 rotation matrix \a mat. * In other words, this function extract the rotation angle * from the rotation matrix. */ template template Rotation2D& Rotation2D::fromRotationMatrix(const MatrixBase& mat) { using std::atan2; EIGEN_STATIC_ASSERT(Derived::RowsAtCompileTime==2 && Derived::ColsAtCompileTime==2,YOU_MADE_A_PROGRAMMING_MISTAKE) m_angle = atan2(mat.coeff(1,0), mat.coeff(0,0)); return *this; } /** Constructs and \returns an equivalent 2x2 rotation matrix. */ template typename Rotation2D::Matrix2 Rotation2D::toRotationMatrix(void) const { using std::sin; using std::cos; Scalar sinA = sin(m_angle); Scalar cosA = cos(m_angle); return (Matrix2() << cosA, -sinA, sinA, cosA).finished(); } } // end namespace Eigen #endif // EIGEN_ROTATION2D_H antimony/lib/fab/vendor/Eigen/src/Jacobi/0000755000175000017500000000000013341130426017501 5ustar tiagotiagoantimony/lib/fab/vendor/Eigen/src/Jacobi/CMakeLists.txt0000644000175000017500000000022313341130426022236 0ustar tiagotiagoFILE(GLOB Eigen_Jacobi_SRCS "*.h") INSTALL(FILES ${Eigen_Jacobi_SRCS} DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Jacobi COMPONENT Devel ) antimony/lib/fab/vendor/Eigen/src/Jacobi/Jacobi.h0000644000175000017500000003375013341130426021051 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Benoit Jacob // Copyright (C) 2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_JACOBI_H #define EIGEN_JACOBI_H namespace Eigen { /** \ingroup Jacobi_Module * \jacobi_module * \class JacobiRotation * \brief Rotation given by a cosine-sine pair. * * This class represents a Jacobi or Givens rotation. * This is a 2D rotation in the plane \c J of angle \f$ \theta \f$ defined by * its cosine \c c and sine \c s as follow: * \f$ J = \left ( \begin{array}{cc} c & \overline s \\ -s & \overline c \end{array} \right ) \f$ * * You can apply the respective counter-clockwise rotation to a column vector \c v by * applying its adjoint on the left: \f$ v = J^* v \f$ that translates to the following Eigen code: * \code * v.applyOnTheLeft(J.adjoint()); * \endcode * * \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight() */ template class JacobiRotation { public: typedef typename NumTraits::Real RealScalar; /** Default constructor without any initialization. */ JacobiRotation() {} /** Construct a planar rotation from a cosine-sine pair (\a c, \c s). */ JacobiRotation(const Scalar& c, const Scalar& s) : m_c(c), m_s(s) {} Scalar& c() { return m_c; } Scalar c() const { return m_c; } Scalar& s() { return m_s; } Scalar s() const { return m_s; } /** Concatenates two planar rotation */ JacobiRotation operator*(const JacobiRotation& other) { using numext::conj; return JacobiRotation(m_c * other.m_c - conj(m_s) * other.m_s, conj(m_c * conj(other.m_s) + conj(m_s) * conj(other.m_c))); } /** Returns the transposed transformation */ JacobiRotation transpose() const { using numext::conj; return JacobiRotation(m_c, -conj(m_s)); } /** Returns the adjoint transformation */ JacobiRotation adjoint() const { using numext::conj; return JacobiRotation(conj(m_c), -m_s); } template bool makeJacobi(const MatrixBase&, typename Derived::Index p, typename Derived::Index q); bool makeJacobi(const RealScalar& x, const Scalar& y, const RealScalar& z); void makeGivens(const Scalar& p, const Scalar& q, Scalar* z=0); protected: void makeGivens(const Scalar& p, const Scalar& q, Scalar* z, internal::true_type); void makeGivens(const Scalar& p, const Scalar& q, Scalar* z, internal::false_type); Scalar m_c, m_s; }; /** Makes \c *this as a Jacobi rotation \a J such that applying \a J on both the right and left sides of the selfadjoint 2x2 matrix * \f$ B = \left ( \begin{array}{cc} x & y \\ \overline y & z \end{array} \right )\f$ yields a diagonal matrix \f$ A = J^* B J \f$ * * \sa MatrixBase::makeJacobi(const MatrixBase&, Index, Index), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight() */ template bool JacobiRotation::makeJacobi(const RealScalar& x, const Scalar& y, const RealScalar& z) { using std::sqrt; using std::abs; typedef typename NumTraits::Real RealScalar; if(y == Scalar(0)) { m_c = Scalar(1); m_s = Scalar(0); return false; } else { RealScalar tau = (x-z)/(RealScalar(2)*abs(y)); RealScalar w = sqrt(numext::abs2(tau) + RealScalar(1)); RealScalar t; if(tau>RealScalar(0)) { t = RealScalar(1) / (tau + w); } else { t = RealScalar(1) / (tau - w); } RealScalar sign_t = t > RealScalar(0) ? RealScalar(1) : RealScalar(-1); RealScalar n = RealScalar(1) / sqrt(numext::abs2(t)+RealScalar(1)); m_s = - sign_t * (numext::conj(y) / abs(y)) * abs(t) * n; m_c = n; return true; } } /** Makes \c *this as a Jacobi rotation \c J such that applying \a J on both the right and left sides of the 2x2 selfadjoint matrix * \f$ B = \left ( \begin{array}{cc} \text{this}_{pp} & \text{this}_{pq} \\ (\text{this}_{pq})^* & \text{this}_{qq} \end{array} \right )\f$ yields * a diagonal matrix \f$ A = J^* B J \f$ * * Example: \include Jacobi_makeJacobi.cpp * Output: \verbinclude Jacobi_makeJacobi.out * * \sa JacobiRotation::makeJacobi(RealScalar, Scalar, RealScalar), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight() */ template template inline bool JacobiRotation::makeJacobi(const MatrixBase& m, typename Derived::Index p, typename Derived::Index q) { return makeJacobi(numext::real(m.coeff(p,p)), m.coeff(p,q), numext::real(m.coeff(q,q))); } /** Makes \c *this as a Givens rotation \c G such that applying \f$ G^* \f$ to the left of the vector * \f$ V = \left ( \begin{array}{c} p \\ q \end{array} \right )\f$ yields: * \f$ G^* V = \left ( \begin{array}{c} r \\ 0 \end{array} \right )\f$. * * The value of \a z is returned if \a z is not null (the default is null). * Also note that G is built such that the cosine is always real. * * Example: \include Jacobi_makeGivens.cpp * Output: \verbinclude Jacobi_makeGivens.out * * This function implements the continuous Givens rotation generation algorithm * found in Anderson (2000), Discontinuous Plane Rotations and the Symmetric Eigenvalue Problem. * LAPACK Working Note 150, University of Tennessee, UT-CS-00-454, December 4, 2000. * * \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight() */ template void JacobiRotation::makeGivens(const Scalar& p, const Scalar& q, Scalar* z) { makeGivens(p, q, z, typename internal::conditional::IsComplex, internal::true_type, internal::false_type>::type()); } // specialization for complexes template void JacobiRotation::makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::true_type) { using std::sqrt; using std::abs; using numext::conj; if(q==Scalar(0)) { m_c = numext::real(p)<0 ? Scalar(-1) : Scalar(1); m_s = 0; if(r) *r = m_c * p; } else if(p==Scalar(0)) { m_c = 0; m_s = -q/abs(q); if(r) *r = abs(q); } else { RealScalar p1 = numext::norm1(p); RealScalar q1 = numext::norm1(q); if(p1>=q1) { Scalar ps = p / p1; RealScalar p2 = numext::abs2(ps); Scalar qs = q / p1; RealScalar q2 = numext::abs2(qs); RealScalar u = sqrt(RealScalar(1) + q2/p2); if(numext::real(p) void JacobiRotation::makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::false_type) { using std::sqrt; using std::abs; if(q==Scalar(0)) { m_c = p abs(q)) { Scalar t = q/p; Scalar u = sqrt(Scalar(1) + numext::abs2(t)); if(p void apply_rotation_in_the_plane(VectorX& _x, VectorY& _y, const JacobiRotation& j); } /** \jacobi_module * Applies the rotation in the plane \a j to the rows \a p and \a q of \c *this, i.e., it computes B = J * B, * with \f$ B = \left ( \begin{array}{cc} \text{*this.row}(p) \\ \text{*this.row}(q) \end{array} \right ) \f$. * * \sa class JacobiRotation, MatrixBase::applyOnTheRight(), internal::apply_rotation_in_the_plane() */ template template inline void MatrixBase::applyOnTheLeft(Index p, Index q, const JacobiRotation& j) { RowXpr x(this->row(p)); RowXpr y(this->row(q)); internal::apply_rotation_in_the_plane(x, y, j); } /** \ingroup Jacobi_Module * Applies the rotation in the plane \a j to the columns \a p and \a q of \c *this, i.e., it computes B = B * J * with \f$ B = \left ( \begin{array}{cc} \text{*this.col}(p) & \text{*this.col}(q) \end{array} \right ) \f$. * * \sa class JacobiRotation, MatrixBase::applyOnTheLeft(), internal::apply_rotation_in_the_plane() */ template template inline void MatrixBase::applyOnTheRight(Index p, Index q, const JacobiRotation& j) { ColXpr x(this->col(p)); ColXpr y(this->col(q)); internal::apply_rotation_in_the_plane(x, y, j.transpose()); } namespace internal { template void /*EIGEN_DONT_INLINE*/ apply_rotation_in_the_plane(VectorX& _x, VectorY& _y, const JacobiRotation& j) { typedef typename VectorX::Index Index; typedef typename VectorX::Scalar Scalar; enum { PacketSize = packet_traits::size }; typedef typename packet_traits::type Packet; eigen_assert(_x.size() == _y.size()); Index size = _x.size(); Index incrx = _x.innerStride(); Index incry = _y.innerStride(); Scalar* EIGEN_RESTRICT x = &_x.coeffRef(0); Scalar* EIGEN_RESTRICT y = &_y.coeffRef(0); OtherScalar c = j.c(); OtherScalar s = j.s(); if (c==OtherScalar(1) && s==OtherScalar(0)) return; /*** dynamic-size vectorized paths ***/ if(VectorX::SizeAtCompileTime == Dynamic && (VectorX::Flags & VectorY::Flags & PacketAccessBit) && ((incrx==1 && incry==1) || PacketSize == 1)) { // both vectors are sequentially stored in memory => vectorization enum { Peeling = 2 }; Index alignedStart = internal::first_aligned(y, size); Index alignedEnd = alignedStart + ((size-alignedStart)/PacketSize)*PacketSize; const Packet pc = pset1(c); const Packet ps = pset1(s); conj_helper::IsComplex,false> pcj; for(Index i=0; i(px); Packet yi = pload(py); pstore(px, padd(pmul(pc,xi),pcj.pmul(ps,yi))); pstore(py, psub(pcj.pmul(pc,yi),pmul(ps,xi))); px += PacketSize; py += PacketSize; } } else { Index peelingEnd = alignedStart + ((size-alignedStart)/(Peeling*PacketSize))*(Peeling*PacketSize); for(Index i=alignedStart; i(px); Packet xi1 = ploadu(px+PacketSize); Packet yi = pload (py); Packet yi1 = pload (py+PacketSize); pstoreu(px, padd(pmul(pc,xi),pcj.pmul(ps,yi))); pstoreu(px+PacketSize, padd(pmul(pc,xi1),pcj.pmul(ps,yi1))); pstore (py, psub(pcj.pmul(pc,yi),pmul(ps,xi))); pstore (py+PacketSize, psub(pcj.pmul(pc,yi1),pmul(ps,xi1))); px += Peeling*PacketSize; py += Peeling*PacketSize; } if(alignedEnd!=peelingEnd) { Packet xi = ploadu(x+peelingEnd); Packet yi = pload (y+peelingEnd); pstoreu(x+peelingEnd, padd(pmul(pc,xi),pcj.pmul(ps,yi))); pstore (y+peelingEnd, psub(pcj.pmul(pc,yi),pmul(ps,xi))); } } for(Index i=alignedEnd; i(c); const Packet ps = pset1(s); conj_helper::IsComplex,false> pcj; Scalar* EIGEN_RESTRICT px = x; Scalar* EIGEN_RESTRICT py = y; for(Index i=0; i(px); Packet yi = pload(py); pstore(px, padd(pmul(pc,xi),pcj.pmul(ps,yi))); pstore(py, psub(pcj.pmul(pc,yi),pmul(ps,xi))); px += PacketSize; py += PacketSize; } } /*** non-vectorized path ***/ else { for(Index i=0; i // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. /* * NOTE: This file is the modified version of sp_coletree.c file in SuperLU * -- SuperLU routine (version 3.1) -- * Univ. of California Berkeley, Xerox Palo Alto Research Center, * and Lawrence Berkeley National Lab. * August 1, 2008 * * Copyright (c) 1994 by Xerox Corporation. All rights reserved. * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program for any * purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is * granted, provided the above notices are retained, and a notice that * the code was modified is included with the above copyright notice. */ #ifndef SPARSE_COLETREE_H #define SPARSE_COLETREE_H namespace Eigen { namespace internal { /** Find the root of the tree/set containing the vertex i : Use Path halving */ template Index etree_find (Index i, IndexVector& pp) { Index p = pp(i); // Parent Index gp = pp(p); // Grand parent while (gp != p) { pp(i) = gp; // Parent pointer on find path is changed to former grand parent i = gp; p = pp(i); gp = pp(p); } return p; } /** Compute the column elimination tree of a sparse matrix * \param mat The matrix in column-major format. * \param parent The elimination tree * \param firstRowElt The column index of the first element in each row * \param perm The permutation to apply to the column of \b mat */ template int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowElt, typename MatrixType::Index *perm=0) { typedef typename MatrixType::Index Index; Index nc = mat.cols(); // Number of columns Index m = mat.rows(); Index diagSize = (std::min)(nc,m); IndexVector root(nc); // root of subtree of etree root.setZero(); IndexVector pp(nc); // disjoint sets pp.setZero(); // Initialize disjoint sets parent.resize(mat.cols()); //Compute first nonzero column in each row Index row,col; firstRowElt.resize(m); firstRowElt.setConstant(nc); firstRowElt.segment(0, diagSize).setLinSpaced(diagSize, 0, diagSize-1); bool found_diag; for (col = 0; col < nc; col++) { Index pcol = col; if(perm) pcol = perm[col]; for (typename MatrixType::InnerIterator it(mat, pcol); it; ++it) { row = it.row(); firstRowElt(row) = (std::min)(firstRowElt(row), col); } } /* Compute etree by Liu's algorithm for symmetric matrices, except use (firstRowElt[r],c) in place of an edge (r,c) of A. Thus each row clique in A'*A is replaced by a star centered at its first vertex, which has the same fill. */ Index rset, cset, rroot; for (col = 0; col < nc; col++) { found_diag = col>=m; pp(col) = col; cset = col; root(cset) = col; parent(col) = nc; /* The diagonal element is treated here even if it does not exist in the matrix * hence the loop is executed once more */ Index pcol = col; if(perm) pcol = perm[col]; for (typename MatrixType::InnerIterator it(mat, pcol); it||!found_diag; ++it) { // A sequence of interleaved find and union is performed Index i = col; if(it) i = it.index(); if (i == col) found_diag = true; row = firstRowElt(i); if (row >= col) continue; rset = internal::etree_find(row, pp); // Find the name of the set containing row rroot = root(rset); if (rroot != col) { parent(rroot) = col; pp(cset) = rset; cset = rset; root(cset) = col; } } } return 0; } /** * Depth-first search from vertex n. No recursion. * This routine was contributed by Cédric Doucet, CEDRAT Group, Meylan, France. */ template void nr_etdfs (Index n, IndexVector& parent, IndexVector& first_kid, IndexVector& next_kid, IndexVector& post, Index postnum) { Index current = n, first, next; while (postnum != n) { // No kid for the current node first = first_kid(current); // no kid for the current node if (first == -1) { // Numbering this node because it has no kid post(current) = postnum++; // looking for the next kid next = next_kid(current); while (next == -1) { // No more kids : back to the parent node current = parent(current); // numbering the parent node post(current) = postnum++; // Get the next kid next = next_kid(current); } // stopping criterion if (postnum == n+1) return; // Updating current node current = next; } else { current = first; } } } /** * \brief Post order a tree * \param n the number of nodes * \param parent Input tree * \param post postordered tree */ template void treePostorder(Index n, IndexVector& parent, IndexVector& post) { IndexVector first_kid, next_kid; // Linked list of children Index postnum; // Allocate storage for working arrays and results first_kid.resize(n+1); next_kid.setZero(n+1); post.setZero(n+1); // Set up structure describing children Index v, dad; first_kid.setConstant(-1); for (v = n-1; v >= 0; v--) { dad = parent(v); next_kid(v) = first_kid(dad); first_kid(dad) = v; } // Depth-first search from dummy root vertex #n postnum = 0; internal::nr_etdfs(n, parent, first_kid, next_kid, post, postnum); } } // end namespace internal } // end namespace Eigen #endif // SPARSE_COLETREE_H antimony/lib/fab/vendor/Eigen/src/SparseCore/SparseCwiseUnaryOp.h0000644000175000017500000001351113341130426024300 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_CWISE_UNARY_OP_H #define EIGEN_SPARSE_CWISE_UNARY_OP_H namespace Eigen { template class CwiseUnaryOpImpl : public SparseMatrixBase > { public: class InnerIterator; class ReverseInnerIterator; typedef CwiseUnaryOp Derived; EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) protected: typedef typename internal::traits::_XprTypeNested _MatrixTypeNested; typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator; typedef typename _MatrixTypeNested::ReverseInnerIterator MatrixTypeReverseIterator; }; template class CwiseUnaryOpImpl::InnerIterator : public CwiseUnaryOpImpl::MatrixTypeIterator { typedef typename CwiseUnaryOpImpl::Scalar Scalar; typedef typename CwiseUnaryOpImpl::MatrixTypeIterator Base; public: EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryOpImpl& unaryOp, typename CwiseUnaryOpImpl::Index outer) : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor()) {} EIGEN_STRONG_INLINE InnerIterator& operator++() { Base::operator++(); return *this; } EIGEN_STRONG_INLINE typename CwiseUnaryOpImpl::Scalar value() const { return m_functor(Base::value()); } protected: const UnaryOp m_functor; private: typename CwiseUnaryOpImpl::Scalar& valueRef(); }; template class CwiseUnaryOpImpl::ReverseInnerIterator : public CwiseUnaryOpImpl::MatrixTypeReverseIterator { typedef typename CwiseUnaryOpImpl::Scalar Scalar; typedef typename CwiseUnaryOpImpl::MatrixTypeReverseIterator Base; public: EIGEN_STRONG_INLINE ReverseInnerIterator(const CwiseUnaryOpImpl& unaryOp, typename CwiseUnaryOpImpl::Index outer) : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor()) {} EIGEN_STRONG_INLINE ReverseInnerIterator& operator--() { Base::operator--(); return *this; } EIGEN_STRONG_INLINE typename CwiseUnaryOpImpl::Scalar value() const { return m_functor(Base::value()); } protected: const UnaryOp m_functor; private: typename CwiseUnaryOpImpl::Scalar& valueRef(); }; template class CwiseUnaryViewImpl : public SparseMatrixBase > { public: class InnerIterator; class ReverseInnerIterator; typedef CwiseUnaryView Derived; EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) protected: typedef typename internal::traits::_MatrixTypeNested _MatrixTypeNested; typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator; typedef typename _MatrixTypeNested::ReverseInnerIterator MatrixTypeReverseIterator; }; template class CwiseUnaryViewImpl::InnerIterator : public CwiseUnaryViewImpl::MatrixTypeIterator { typedef typename CwiseUnaryViewImpl::Scalar Scalar; typedef typename CwiseUnaryViewImpl::MatrixTypeIterator Base; public: EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryViewImpl& unaryOp, typename CwiseUnaryViewImpl::Index outer) : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor()) {} EIGEN_STRONG_INLINE InnerIterator& operator++() { Base::operator++(); return *this; } EIGEN_STRONG_INLINE typename CwiseUnaryViewImpl::Scalar value() const { return m_functor(Base::value()); } EIGEN_STRONG_INLINE typename CwiseUnaryViewImpl::Scalar& valueRef() { return m_functor(Base::valueRef()); } protected: const ViewOp m_functor; }; template class CwiseUnaryViewImpl::ReverseInnerIterator : public CwiseUnaryViewImpl::MatrixTypeReverseIterator { typedef typename CwiseUnaryViewImpl::Scalar Scalar; typedef typename CwiseUnaryViewImpl::MatrixTypeReverseIterator Base; public: EIGEN_STRONG_INLINE ReverseInnerIterator(const CwiseUnaryViewImpl& unaryOp, typename CwiseUnaryViewImpl::Index outer) : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor()) {} EIGEN_STRONG_INLINE ReverseInnerIterator& operator--() { Base::operator--(); return *this; } EIGEN_STRONG_INLINE typename CwiseUnaryViewImpl::Scalar value() const { return m_functor(Base::value()); } EIGEN_STRONG_INLINE typename CwiseUnaryViewImpl::Scalar& valueRef() { return m_functor(Base::valueRef()); } protected: const ViewOp m_functor; }; template EIGEN_STRONG_INLINE Derived& SparseMatrixBase::operator*=(const Scalar& other) { for (Index j=0; j EIGEN_STRONG_INLINE Derived& SparseMatrixBase::operator/=(const Scalar& other) { for (Index j=0; j // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_BLOCK_H #define EIGEN_SPARSE_BLOCK_H namespace Eigen { template class BlockImpl : public SparseMatrixBase > { typedef typename internal::remove_all::type _MatrixTypeNested; typedef Block BlockType; public: enum { IsRowMajor = internal::traits::IsRowMajor }; protected: enum { OuterSize = IsRowMajor ? BlockRows : BlockCols }; public: EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) class InnerIterator: public XprType::InnerIterator { typedef typename BlockImpl::Index Index; public: inline InnerIterator(const BlockType& xpr, Index outer) : XprType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) {} inline Index row() const { return IsRowMajor ? m_outer : this->index(); } inline Index col() const { return IsRowMajor ? this->index() : m_outer; } protected: Index m_outer; }; class ReverseInnerIterator: public XprType::ReverseInnerIterator { typedef typename BlockImpl::Index Index; public: inline ReverseInnerIterator(const BlockType& xpr, Index outer) : XprType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) {} inline Index row() const { return IsRowMajor ? m_outer : this->index(); } inline Index col() const { return IsRowMajor ? this->index() : m_outer; } protected: Index m_outer; }; inline BlockImpl(const XprType& xpr, int i) : m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize) {} inline BlockImpl(const XprType& xpr, int startRow, int startCol, int blockRows, int blockCols) : m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols) {} EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } protected: typename XprType::Nested m_matrix; Index m_outerStart; const internal::variable_if_dynamic m_outerSize; EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl) private: Index nonZeros() const; }; /*************************************************************************** * specialisation for SparseMatrix ***************************************************************************/ template class BlockImpl,BlockRows,BlockCols,true,Sparse> : public SparseMatrixBase,BlockRows,BlockCols,true> > { typedef SparseMatrix<_Scalar, _Options, _Index> SparseMatrixType; typedef typename internal::remove_all::type _MatrixTypeNested; typedef Block BlockType; typedef Block ConstBlockType; public: enum { IsRowMajor = internal::traits::IsRowMajor }; EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) protected: enum { OuterSize = IsRowMajor ? BlockRows : BlockCols }; public: class InnerIterator: public SparseMatrixType::InnerIterator { public: inline InnerIterator(const BlockType& xpr, Index outer) : SparseMatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) {} inline Index row() const { return IsRowMajor ? m_outer : this->index(); } inline Index col() const { return IsRowMajor ? this->index() : m_outer; } protected: Index m_outer; }; class ReverseInnerIterator: public SparseMatrixType::ReverseInnerIterator { public: inline ReverseInnerIterator(const BlockType& xpr, Index outer) : SparseMatrixType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) {} inline Index row() const { return IsRowMajor ? m_outer : this->index(); } inline Index col() const { return IsRowMajor ? this->index() : m_outer; } protected: Index m_outer; }; inline BlockImpl(const SparseMatrixType& xpr, int i) : m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize) {} inline BlockImpl(const SparseMatrixType& xpr, int startRow, int startCol, int blockRows, int blockCols) : m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols) {} template inline BlockType& operator=(const SparseMatrixBase& other) { typedef typename internal::remove_all::type _NestedMatrixType; _NestedMatrixType& matrix = const_cast<_NestedMatrixType&>(m_matrix);; // This assignement is slow if this vector set is not empty // and/or it is not at the end of the nonzeros of the underlying matrix. // 1 - eval to a temporary to avoid transposition and/or aliasing issues SparseMatrix tmp(other); // 2 - let's check whether there is enough allocated memory Index nnz = tmp.nonZeros(); Index start = m_outerStart==0 ? 0 : matrix.outerIndexPtr()[m_outerStart]; // starting position of the current block Index end = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; // ending posiiton of the current block Index block_size = end - start; // available room in the current block Index tail_size = m_matrix.outerIndexPtr()[m_matrix.outerSize()] - end; Index free_size = m_matrix.isCompressed() ? Index(matrix.data().allocatedSize()) + block_size : block_size; if(nnz>free_size) { // realloc manually to reduce copies typename SparseMatrixType::Storage newdata(m_matrix.data().allocatedSize() - block_size + nnz); std::memcpy(&newdata.value(0), &m_matrix.data().value(0), start*sizeof(Scalar)); std::memcpy(&newdata.index(0), &m_matrix.data().index(0), start*sizeof(Index)); std::memcpy(&newdata.value(start), &tmp.data().value(0), nnz*sizeof(Scalar)); std::memcpy(&newdata.index(start), &tmp.data().index(0), nnz*sizeof(Index)); std::memcpy(&newdata.value(start+nnz), &matrix.data().value(end), tail_size*sizeof(Scalar)); std::memcpy(&newdata.index(start+nnz), &matrix.data().index(end), tail_size*sizeof(Index)); newdata.resize(m_matrix.outerIndexPtr()[m_matrix.outerSize()] - block_size + nnz); matrix.data().swap(newdata); } else { // no need to realloc, simply copy the tail at its respective position and insert tmp matrix.data().resize(start + nnz + tail_size); std::memmove(&matrix.data().value(start+nnz), &matrix.data().value(end), tail_size*sizeof(Scalar)); std::memmove(&matrix.data().index(start+nnz), &matrix.data().index(end), tail_size*sizeof(Index)); std::memcpy(&matrix.data().value(start), &tmp.data().value(0), nnz*sizeof(Scalar)); std::memcpy(&matrix.data().index(start), &tmp.data().index(0), nnz*sizeof(Index)); } // update innerNonZeros if(!m_matrix.isCompressed()) for(Index j=0; j(other); } inline const Scalar* valuePtr() const { return m_matrix.valuePtr() + m_matrix.outerIndexPtr()[m_outerStart]; } inline Scalar* valuePtr() { return m_matrix.const_cast_derived().valuePtr() + m_matrix.outerIndexPtr()[m_outerStart]; } inline const Index* innerIndexPtr() const { return m_matrix.innerIndexPtr() + m_matrix.outerIndexPtr()[m_outerStart]; } inline Index* innerIndexPtr() { return m_matrix.const_cast_derived().innerIndexPtr() + m_matrix.outerIndexPtr()[m_outerStart]; } inline const Index* outerIndexPtr() const { return m_matrix.outerIndexPtr() + m_outerStart; } inline Index* outerIndexPtr() { return m_matrix.const_cast_derived().outerIndexPtr() + m_outerStart; } Index nonZeros() const { if(m_matrix.isCompressed()) return std::size_t(m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]) - std::size_t(m_matrix.outerIndexPtr()[m_outerStart]); else if(m_outerSize.value()==0) return 0; else return Map >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum(); } const Scalar& lastCoeff() const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(BlockImpl); eigen_assert(nonZeros()>0); if(m_matrix.isCompressed()) return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1]; else return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1]; } EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } protected: typename SparseMatrixType::Nested m_matrix; Index m_outerStart; const internal::variable_if_dynamic m_outerSize; }; template class BlockImpl,BlockRows,BlockCols,true,Sparse> : public SparseMatrixBase,BlockRows,BlockCols,true> > { typedef SparseMatrix<_Scalar, _Options, _Index> SparseMatrixType; typedef typename internal::remove_all::type _MatrixTypeNested; typedef Block BlockType; public: enum { IsRowMajor = internal::traits::IsRowMajor }; EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) protected: enum { OuterSize = IsRowMajor ? BlockRows : BlockCols }; public: class InnerIterator: public SparseMatrixType::InnerIterator { public: inline InnerIterator(const BlockType& xpr, Index outer) : SparseMatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) {} inline Index row() const { return IsRowMajor ? m_outer : this->index(); } inline Index col() const { return IsRowMajor ? this->index() : m_outer; } protected: Index m_outer; }; class ReverseInnerIterator: public SparseMatrixType::ReverseInnerIterator { public: inline ReverseInnerIterator(const BlockType& xpr, Index outer) : SparseMatrixType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) {} inline Index row() const { return IsRowMajor ? m_outer : this->index(); } inline Index col() const { return IsRowMajor ? this->index() : m_outer; } protected: Index m_outer; }; inline BlockImpl(const SparseMatrixType& xpr, int i) : m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize) {} inline BlockImpl(const SparseMatrixType& xpr, int startRow, int startCol, int blockRows, int blockCols) : m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols) {} inline const Scalar* valuePtr() const { return m_matrix.valuePtr() + m_matrix.outerIndexPtr()[m_outerStart]; } inline const Index* innerIndexPtr() const { return m_matrix.innerIndexPtr() + m_matrix.outerIndexPtr()[m_outerStart]; } inline const Index* outerIndexPtr() const { return m_matrix.outerIndexPtr() + m_outerStart; } Index nonZeros() const { if(m_matrix.isCompressed()) return std::size_t(m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]) - std::size_t(m_matrix.outerIndexPtr()[m_outerStart]); else if(m_outerSize.value()==0) return 0; else return Map >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum(); } const Scalar& lastCoeff() const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(BlockImpl); eigen_assert(nonZeros()>0); if(m_matrix.isCompressed()) return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1]; else return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1]; } EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } protected: typename SparseMatrixType::Nested m_matrix; Index m_outerStart; const internal::variable_if_dynamic m_outerSize; }; //---------- /** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this * is col-major (resp. row-major). */ template typename SparseMatrixBase::InnerVectorReturnType SparseMatrixBase::innerVector(Index outer) { return InnerVectorReturnType(derived(), outer); } /** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this * is col-major (resp. row-major). Read-only. */ template const typename SparseMatrixBase::ConstInnerVectorReturnType SparseMatrixBase::innerVector(Index outer) const { return ConstInnerVectorReturnType(derived(), outer); } /** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this * is col-major (resp. row-major). */ template Block SparseMatrixBase::innerVectors(Index outerStart, Index outerSize) { return Block(derived(), IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart, IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize); } /** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this * is col-major (resp. row-major). Read-only. */ template const Block SparseMatrixBase::innerVectors(Index outerStart, Index outerSize) const { return Block(derived(), IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart, IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize); } /** Generic implementation of sparse Block expression. * Real-only. */ template class BlockImpl : public SparseMatrixBase >, internal::no_assignment_operator { typedef typename internal::remove_all::type _MatrixTypeNested; typedef Block BlockType; public: enum { IsRowMajor = internal::traits::IsRowMajor }; EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) /** Column or Row constructor */ inline BlockImpl(const XprType& xpr, int i) : m_matrix(xpr), m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0), m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0), m_blockRows(xpr.rows()), m_blockCols(xpr.cols()) {} /** Dynamic-size constructor */ inline BlockImpl(const XprType& xpr, int startRow, int startCol, int blockRows, int blockCols) : m_matrix(xpr), m_startRow(startRow), m_startCol(startCol), m_blockRows(blockRows), m_blockCols(blockCols) {} inline int rows() const { return m_blockRows.value(); } inline int cols() const { return m_blockCols.value(); } inline Scalar& coeffRef(int row, int col) { return m_matrix.const_cast_derived() .coeffRef(row + m_startRow.value(), col + m_startCol.value()); } inline const Scalar coeff(int row, int col) const { return m_matrix.coeff(row + m_startRow.value(), col + m_startCol.value()); } inline Scalar& coeffRef(int index) { return m_matrix.const_cast_derived() .coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); } inline const Scalar coeff(int index) const { return m_matrix .coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); } inline const _MatrixTypeNested& nestedExpression() const { return m_matrix; } class InnerIterator : public _MatrixTypeNested::InnerIterator { typedef typename _MatrixTypeNested::InnerIterator Base; const BlockType& m_block; Index m_end; public: EIGEN_STRONG_INLINE InnerIterator(const BlockType& block, Index outer) : Base(block.derived().nestedExpression(), outer + (IsRowMajor ? block.m_startRow.value() : block.m_startCol.value())), m_block(block), m_end(IsRowMajor ? block.m_startCol.value()+block.m_blockCols.value() : block.m_startRow.value()+block.m_blockRows.value()) { while( (Base::operator bool()) && (Base::index() < (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value())) ) Base::operator++(); } inline Index index() const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); } inline Index outer() const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); } inline Index row() const { return Base::row() - m_block.m_startRow.value(); } inline Index col() const { return Base::col() - m_block.m_startCol.value(); } inline operator bool() const { return Base::operator bool() && Base::index() < m_end; } }; class ReverseInnerIterator : public _MatrixTypeNested::ReverseInnerIterator { typedef typename _MatrixTypeNested::ReverseInnerIterator Base; const BlockType& m_block; Index m_begin; public: EIGEN_STRONG_INLINE ReverseInnerIterator(const BlockType& block, Index outer) : Base(block.derived().nestedExpression(), outer + (IsRowMajor ? block.m_startRow.value() : block.m_startCol.value())), m_block(block), m_begin(IsRowMajor ? block.m_startCol.value() : block.m_startRow.value()) { while( (Base::operator bool()) && (Base::index() >= (IsRowMajor ? m_block.m_startCol.value()+block.m_blockCols.value() : m_block.m_startRow.value()+block.m_blockRows.value())) ) Base::operator--(); } inline Index index() const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); } inline Index outer() const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); } inline Index row() const { return Base::row() - m_block.m_startRow.value(); } inline Index col() const { return Base::col() - m_block.m_startCol.value(); } inline operator bool() const { return Base::operator bool() && Base::index() >= m_begin; } }; protected: friend class InnerIterator; friend class ReverseInnerIterator; EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl) typename XprType::Nested m_matrix; const internal::variable_if_dynamic m_startRow; const internal::variable_if_dynamic m_startCol; const internal::variable_if_dynamic m_blockRows; const internal::variable_if_dynamic m_blockCols; }; } // end namespace Eigen #endif // EIGEN_SPARSE_BLOCK_H antimony/lib/fab/vendor/Eigen/src/SparseCore/SparseVector.h0000644000175000017500000003244213341130426023156 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSEVECTOR_H #define EIGEN_SPARSEVECTOR_H namespace Eigen { /** \ingroup SparseCore_Module * \class SparseVector * * \brief a sparse vector class * * \tparam _Scalar the scalar type, i.e. the type of the coefficients * * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme. * * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEVECTOR_PLUGIN. */ namespace internal { template struct traits > { typedef _Scalar Scalar; typedef _Index Index; typedef Sparse StorageKind; typedef MatrixXpr XprKind; enum { IsColVector = (_Options & RowMajorBit) ? 0 : 1, RowsAtCompileTime = IsColVector ? Dynamic : 1, ColsAtCompileTime = IsColVector ? 1 : Dynamic, MaxRowsAtCompileTime = RowsAtCompileTime, MaxColsAtCompileTime = ColsAtCompileTime, Flags = _Options | NestByRefBit | LvalueBit | (IsColVector ? 0 : RowMajorBit), CoeffReadCost = NumTraits::ReadCost, SupportedAccessPatterns = InnerRandomAccessPattern }; }; // Sparse-Vector-Assignment kinds: enum { SVA_RuntimeSwitch, SVA_Inner, SVA_Outer }; template< typename Dest, typename Src, int AssignmentKind = !bool(Src::IsVectorAtCompileTime) ? SVA_RuntimeSwitch : Src::InnerSizeAtCompileTime==1 ? SVA_Outer : SVA_Inner> struct sparse_vector_assign_selector; } template class SparseVector : public SparseMatrixBase > { typedef SparseMatrixBase SparseBase; public: EIGEN_SPARSE_PUBLIC_INTERFACE(SparseVector) EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=) EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=) typedef internal::CompressedStorage Storage; enum { IsColVector = internal::traits::IsColVector }; enum { Options = _Options }; EIGEN_STRONG_INLINE Index rows() const { return IsColVector ? m_size : 1; } EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; } EIGEN_STRONG_INLINE Index innerSize() const { return m_size; } EIGEN_STRONG_INLINE Index outerSize() const { return 1; } EIGEN_STRONG_INLINE const Scalar* valuePtr() const { return &m_data.value(0); } EIGEN_STRONG_INLINE Scalar* valuePtr() { return &m_data.value(0); } EIGEN_STRONG_INLINE const Index* innerIndexPtr() const { return &m_data.index(0); } EIGEN_STRONG_INLINE Index* innerIndexPtr() { return &m_data.index(0); } /** \internal */ inline Storage& data() { return m_data; } /** \internal */ inline const Storage& data() const { return m_data; } inline Scalar coeff(Index row, Index col) const { eigen_assert(IsColVector ? (col==0 && row>=0 && row=0 && col=0 && i=0 && row=0 && col=0 && i(m_data.size()); } inline void startVec(Index outer) { EIGEN_UNUSED_VARIABLE(outer); eigen_assert(outer==0); } inline Scalar& insertBackByOuterInner(Index outer, Index inner) { EIGEN_UNUSED_VARIABLE(outer); eigen_assert(outer==0); return insertBack(inner); } inline Scalar& insertBack(Index i) { m_data.append(0, i); return m_data.value(m_data.size()-1); } inline Scalar& insert(Index row, Index col) { eigen_assert(IsColVector ? (col==0 && row>=0 && row=0 && col=0 && i= startId) && (m_data.index(p) > i) ) { m_data.index(p+1) = m_data.index(p); m_data.value(p+1) = m_data.value(p); --p; } m_data.index(p+1) = i; m_data.value(p+1) = 0; return m_data.value(p+1); } /** */ inline void reserve(Index reserveSize) { m_data.reserve(reserveSize); } inline void finalize() {} void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits::dummy_precision()) { m_data.prune(reference,epsilon); } void resize(Index rows, Index cols) { eigen_assert(rows==1 || cols==1); resize(IsColVector ? rows : cols); } void resize(Index newSize) { m_size = newSize; m_data.clear(); } void resizeNonZeros(Index size) { m_data.resize(size); } inline SparseVector() : m_size(0) { check_template_parameters(); resize(0); } inline SparseVector(Index size) : m_size(0) { check_template_parameters(); resize(size); } inline SparseVector(Index rows, Index cols) : m_size(0) { check_template_parameters(); resize(rows,cols); } template inline SparseVector(const SparseMatrixBase& other) : m_size(0) { check_template_parameters(); *this = other.derived(); } inline SparseVector(const SparseVector& other) : SparseBase(other), m_size(0) { check_template_parameters(); *this = other.derived(); } /** Swaps the values of \c *this and \a other. * Overloaded for performance: this version performs a \em shallow swap by swaping pointers and attributes only. * \sa SparseMatrixBase::swap() */ inline void swap(SparseVector& other) { std::swap(m_size, other.m_size); m_data.swap(other.m_data); } inline SparseVector& operator=(const SparseVector& other) { if (other.isRValue()) { swap(other.const_cast_derived()); } else { resize(other.size()); m_data = other.m_data; } return *this; } template inline SparseVector& operator=(const SparseMatrixBase& other) { SparseVector tmp(other.size()); internal::sparse_vector_assign_selector::run(tmp,other.derived()); this->swap(tmp); return *this; } #ifndef EIGEN_PARSED_BY_DOXYGEN template inline SparseVector& operator=(const SparseSparseProduct& product) { return Base::operator=(product); } #endif friend std::ostream & operator << (std::ostream & s, const SparseVector& m) { for (Index i=0; i::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE); EIGEN_STATIC_ASSERT((_Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS); } Storage m_data; Index m_size; }; template class SparseVector::InnerIterator { public: InnerIterator(const SparseVector& vec, Index outer=0) : m_data(vec.m_data), m_id(0), m_end(static_cast(m_data.size())) { EIGEN_UNUSED_VARIABLE(outer); eigen_assert(outer==0); } InnerIterator(const internal::CompressedStorage& data) : m_data(data), m_id(0), m_end(static_cast(m_data.size())) {} inline InnerIterator& operator++() { m_id++; return *this; } inline Scalar value() const { return m_data.value(m_id); } inline Scalar& valueRef() { return const_cast(m_data.value(m_id)); } inline Index index() const { return m_data.index(m_id); } inline Index row() const { return IsColVector ? index() : 0; } inline Index col() const { return IsColVector ? 0 : index(); } inline operator bool() const { return (m_id < m_end); } protected: const internal::CompressedStorage& m_data; Index m_id; const Index m_end; }; template class SparseVector::ReverseInnerIterator { public: ReverseInnerIterator(const SparseVector& vec, Index outer=0) : m_data(vec.m_data), m_id(static_cast(m_data.size())), m_start(0) { EIGEN_UNUSED_VARIABLE(outer); eigen_assert(outer==0); } ReverseInnerIterator(const internal::CompressedStorage& data) : m_data(data), m_id(static_cast(m_data.size())), m_start(0) {} inline ReverseInnerIterator& operator--() { m_id--; return *this; } inline Scalar value() const { return m_data.value(m_id-1); } inline Scalar& valueRef() { return const_cast(m_data.value(m_id-1)); } inline Index index() const { return m_data.index(m_id-1); } inline Index row() const { return IsColVector ? index() : 0; } inline Index col() const { return IsColVector ? 0 : index(); } inline operator bool() const { return (m_id > m_start); } protected: const internal::CompressedStorage& m_data; Index m_id; const Index m_start; }; namespace internal { template< typename Dest, typename Src> struct sparse_vector_assign_selector { static void run(Dest& dst, const Src& src) { eigen_internal_assert(src.innerSize()==src.size()); for(typename Src::InnerIterator it(src, 0); it; ++it) dst.insert(it.index()) = it.value(); } }; template< typename Dest, typename Src> struct sparse_vector_assign_selector { static void run(Dest& dst, const Src& src) { eigen_internal_assert(src.outerSize()==src.size()); for(typename Dest::Index i=0; i struct sparse_vector_assign_selector { static void run(Dest& dst, const Src& src) { if(src.outerSize()==1) sparse_vector_assign_selector::run(dst, src); else sparse_vector_assign_selector::run(dst, src); } }; } } // end namespace Eigen #endif // EIGEN_SPARSEVECTOR_H antimony/lib/fab/vendor/Eigen/src/SparseCore/SparseSelfAdjointView.h0000644000175000017500000004403613341130426024753 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H #define EIGEN_SPARSE_SELFADJOINTVIEW_H namespace Eigen { /** \ingroup SparseCore_Module * \class SparseSelfAdjointView * * \brief Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix. * * \param MatrixType the type of the dense matrix storing the coefficients * \param UpLo can be either \c #Lower or \c #Upper * * This class is an expression of a sefladjoint matrix from a triangular part of a matrix * with given dense storage of the coefficients. It is the return type of MatrixBase::selfadjointView() * and most of the time this is the only way that it is used. * * \sa SparseMatrixBase::selfadjointView() */ template class SparseSelfAdjointTimeDenseProduct; template class DenseTimeSparseSelfAdjointProduct; namespace internal { template struct traits > : traits { }; template void permute_symm_to_symm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::Index* perm = 0); template void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::Index* perm = 0); } template class SparseSelfAdjointView : public EigenBase > { public: typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Index Index; typedef Matrix VectorI; typedef typename MatrixType::Nested MatrixTypeNested; typedef typename internal::remove_all::type _MatrixTypeNested; inline SparseSelfAdjointView(const MatrixType& matrix) : m_matrix(matrix) { eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices"); } inline Index rows() const { return m_matrix.rows(); } inline Index cols() const { return m_matrix.cols(); } /** \internal \returns a reference to the nested matrix */ const _MatrixTypeNested& matrix() const { return m_matrix; } _MatrixTypeNested& matrix() { return m_matrix.const_cast_derived(); } /** \returns an expression of the matrix product between a sparse self-adjoint matrix \c *this and a sparse matrix \a rhs. * * Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product. * Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product. */ template SparseSparseProduct operator*(const SparseMatrixBase& rhs) const { return SparseSparseProduct(*this, rhs.derived()); } /** \returns an expression of the matrix product between a sparse matrix \a lhs and a sparse self-adjoint matrix \a rhs. * * Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product. * Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product. */ template friend SparseSparseProduct operator*(const SparseMatrixBase& lhs, const SparseSelfAdjointView& rhs) { return SparseSparseProduct(lhs.derived(), rhs); } /** Efficient sparse self-adjoint matrix times dense vector/matrix product */ template SparseSelfAdjointTimeDenseProduct operator*(const MatrixBase& rhs) const { return SparseSelfAdjointTimeDenseProduct(m_matrix, rhs.derived()); } /** Efficient dense vector/matrix times sparse self-adjoint matrix product */ template friend DenseTimeSparseSelfAdjointProduct operator*(const MatrixBase& lhs, const SparseSelfAdjointView& rhs) { return DenseTimeSparseSelfAdjointProduct(lhs.derived(), rhs.m_matrix); } /** Perform a symmetric rank K update of the selfadjoint matrix \c *this: * \f$ this = this + \alpha ( u u^* ) \f$ where \a u is a vector or matrix. * * \returns a reference to \c *this * * To perform \f$ this = this + \alpha ( u^* u ) \f$ you can simply * call this function with u.adjoint(). */ template SparseSelfAdjointView& rankUpdate(const SparseMatrixBase& u, const Scalar& alpha = Scalar(1)); /** \internal triggered by sparse_matrix = SparseSelfadjointView; */ template void evalTo(SparseMatrix& _dest) const { internal::permute_symm_to_fullsymm(m_matrix, _dest); } template void evalTo(DynamicSparseMatrix& _dest) const { // TODO directly evaluate into _dest; SparseMatrix tmp(_dest.rows(),_dest.cols()); internal::permute_symm_to_fullsymm(m_matrix, tmp); _dest = tmp; } /** \returns an expression of P H P^-1 */ SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo> twistedBy(const PermutationMatrix& perm) const { return SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo>(m_matrix, perm); } template SparseSelfAdjointView& operator=(const SparseSymmetricPermutationProduct& permutedMatrix) { permutedMatrix.evalTo(*this); return *this; } SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src) { PermutationMatrix pnull; return *this = src.twistedBy(pnull); } template SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src) { PermutationMatrix pnull; return *this = src.twistedBy(pnull); } // const SparseLLT llt() const; // const SparseLDLT ldlt() const; protected: typename MatrixType::Nested m_matrix; mutable VectorI m_countPerRow; mutable VectorI m_countPerCol; }; /*************************************************************************** * Implementation of SparseMatrixBase methods ***************************************************************************/ template template const SparseSelfAdjointView SparseMatrixBase::selfadjointView() const { return derived(); } template template SparseSelfAdjointView SparseMatrixBase::selfadjointView() { return derived(); } /*************************************************************************** * Implementation of SparseSelfAdjointView methods ***************************************************************************/ template template SparseSelfAdjointView& SparseSelfAdjointView::rankUpdate(const SparseMatrixBase& u, const Scalar& alpha) { SparseMatrix tmp = u * u.adjoint(); if(alpha==Scalar(0)) m_matrix.const_cast_derived() = tmp.template triangularView(); else m_matrix.const_cast_derived() += alpha * tmp.template triangularView(); return *this; } /*************************************************************************** * Implementation of sparse self-adjoint time dense matrix ***************************************************************************/ namespace internal { template struct traits > : traits, Lhs, Rhs> > { typedef Dense StorageKind; }; } template class SparseSelfAdjointTimeDenseProduct : public ProductBase, Lhs, Rhs> { public: EIGEN_PRODUCT_PUBLIC_INTERFACE(SparseSelfAdjointTimeDenseProduct) SparseSelfAdjointTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {} template void scaleAndAddTo(Dest& dest, const Scalar& alpha) const { EIGEN_ONLY_USED_FOR_DEBUG(alpha); // TODO use alpha eigen_assert(alpha==Scalar(1) && "alpha != 1 is not implemented yet, sorry"); typedef typename internal::remove_all::type _Lhs; typedef typename _Lhs::InnerIterator LhsInnerIterator; enum { LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit, ProcessFirstHalf = ((UpLo&(Upper|Lower))==(Upper|Lower)) || ( (UpLo&Upper) && !LhsIsRowMajor) || ( (UpLo&Lower) && LhsIsRowMajor), ProcessSecondHalf = !ProcessFirstHalf }; for (Index j=0; j struct traits > : traits, Lhs, Rhs> > {}; } template class DenseTimeSparseSelfAdjointProduct : public ProductBase, Lhs, Rhs> { public: EIGEN_PRODUCT_PUBLIC_INTERFACE(DenseTimeSparseSelfAdjointProduct) DenseTimeSparseSelfAdjointProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {} template void scaleAndAddTo(Dest& /*dest*/, const Scalar& /*alpha*/) const { // TODO } private: DenseTimeSparseSelfAdjointProduct& operator=(const DenseTimeSparseSelfAdjointProduct&); }; /*************************************************************************** * Implementation of symmetric copies and permutations ***************************************************************************/ namespace internal { template struct traits > : traits { }; template void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::Index* perm) { typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; typedef SparseMatrix Dest; typedef Matrix VectorI; Dest& dest(_dest.derived()); enum { StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor) }; Index size = mat.rows(); VectorI count; count.resize(size); count.setZero(); dest.resize(size,size); for(Index j = 0; jc) || ( UpLo==Upper && rc) || ( (UpLo&Upper)==Upper && r void permute_symm_to_symm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::Index* perm) { typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; SparseMatrix& dest(_dest.derived()); typedef Matrix VectorI; enum { SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor, StorageOrderMatch = int(SrcOrder) == int(DstOrder), DstUpLo = DstOrder==RowMajor ? (_DstUpLo==Upper ? Lower : Upper) : _DstUpLo, SrcUpLo = SrcOrder==RowMajor ? (_SrcUpLo==Upper ? Lower : Upper) : _SrcUpLo }; Index size = mat.rows(); VectorI count(size); count.setZero(); dest.resize(size,size); for(Index j = 0; jj)) continue; Index ip = perm ? perm[i] : i; count[int(DstUpLo)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++; } } dest.outerIndexPtr()[0] = 0; for(Index j=0; jj)) continue; Index jp = perm ? perm[j] : j; Index ip = perm? perm[i] : i; Index k = count[int(DstUpLo)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++; dest.innerIndexPtr()[k] = int(DstUpLo)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp); if(!StorageOrderMatch) std::swap(ip,jp); if( ((int(DstUpLo)==int(Lower) && ipjp))) dest.valuePtr()[k] = numext::conj(it.value()); else dest.valuePtr()[k] = it.value(); } } } } template class SparseSymmetricPermutationProduct : public EigenBase > { public: typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Index Index; protected: typedef PermutationMatrix Perm; public: typedef Matrix VectorI; typedef typename MatrixType::Nested MatrixTypeNested; typedef typename internal::remove_all::type _MatrixTypeNested; SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm) : m_matrix(mat), m_perm(perm) {} inline Index rows() const { return m_matrix.rows(); } inline Index cols() const { return m_matrix.cols(); } template void evalTo(SparseMatrix& _dest) const { // internal::permute_symm_to_fullsymm(m_matrix,_dest,m_perm.indices().data()); SparseMatrix tmp; internal::permute_symm_to_fullsymm(m_matrix,tmp,m_perm.indices().data()); _dest = tmp; } template void evalTo(SparseSelfAdjointView& dest) const { internal::permute_symm_to_symm(m_matrix,dest.matrix(),m_perm.indices().data()); } protected: MatrixTypeNested m_matrix; const Perm& m_perm; }; } // end namespace Eigen #endif // EIGEN_SPARSE_SELFADJOINTVIEW_H antimony/lib/fab/vendor/Eigen/src/SparseCore/SparseFuzzy.h0000644000175000017500000000173013341130426023037 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_FUZZY_H #define EIGEN_SPARSE_FUZZY_H // template // template // bool SparseMatrixBase::isApprox( // const OtherDerived& other, // typename NumTraits::Real prec // ) const // { // const typename internal::nested::type nested(derived()); // const typename internal::nested::type otherNested(other.derived()); // return (nested - otherNested).cwise().abs2().sum() // <= prec * prec * (std::min)(nested.cwise().abs2().sum(), otherNested.cwise().abs2().sum()); // } #endif // EIGEN_SPARSE_FUZZY_H antimony/lib/fab/vendor/Eigen/src/SparseCore/SparseView.h0000644000175000017500000000535713341130426022633 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011 Gael Guennebaud // Copyright (C) 2010 Daniel Lowengrub // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSEVIEW_H #define EIGEN_SPARSEVIEW_H namespace Eigen { namespace internal { template struct traits > : traits { typedef typename MatrixType::Index Index; typedef Sparse StorageKind; enum { Flags = int(traits::Flags) & (RowMajorBit) }; }; } // end namespace internal template class SparseView : public SparseMatrixBase > { typedef typename MatrixType::Nested MatrixTypeNested; typedef typename internal::remove_all::type _MatrixTypeNested; public: EIGEN_SPARSE_PUBLIC_INTERFACE(SparseView) SparseView(const MatrixType& mat, const Scalar& m_reference = Scalar(0), typename NumTraits::Real m_epsilon = NumTraits::dummy_precision()) : m_matrix(mat), m_reference(m_reference), m_epsilon(m_epsilon) {} class InnerIterator; inline Index rows() const { return m_matrix.rows(); } inline Index cols() const { return m_matrix.cols(); } inline Index innerSize() const { return m_matrix.innerSize(); } inline Index outerSize() const { return m_matrix.outerSize(); } protected: MatrixTypeNested m_matrix; Scalar m_reference; typename NumTraits::Real m_epsilon; }; template class SparseView::InnerIterator : public _MatrixTypeNested::InnerIterator { typedef typename SparseView::Index Index; public: typedef typename _MatrixTypeNested::InnerIterator IterBase; InnerIterator(const SparseView& view, Index outer) : IterBase(view.m_matrix, outer), m_view(view) { incrementToNonZero(); } EIGEN_STRONG_INLINE InnerIterator& operator++() { IterBase::operator++(); incrementToNonZero(); return *this; } using IterBase::value; protected: const SparseView& m_view; private: void incrementToNonZero() { while((bool(*this)) && internal::isMuchSmallerThan(value(), m_view.m_reference, m_view.m_epsilon)) { IterBase::operator++(); } } }; template const SparseView MatrixBase::sparseView(const Scalar& m_reference, const typename NumTraits::Real& m_epsilon) const { return SparseView(derived(), m_reference, m_epsilon); } } // end namespace Eigen #endif antimony/lib/fab/vendor/Eigen/src/SparseCore/AmbiVector.h0000644000175000017500000002346313341130426022574 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_AMBIVECTOR_H #define EIGEN_AMBIVECTOR_H namespace Eigen { namespace internal { /** \internal * Hybrid sparse/dense vector class designed for intensive read-write operations. * * See BasicSparseLLT and SparseProduct for usage examples. */ template class AmbiVector { public: typedef _Scalar Scalar; typedef _Index Index; typedef typename NumTraits::Real RealScalar; AmbiVector(Index size) : m_buffer(0), m_zero(0), m_size(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1) { resize(size); } void init(double estimatedDensity); void init(int mode); Index nonZeros() const; /** Specifies a sub-vector to work on */ void setBounds(Index start, Index end) { m_start = start; m_end = end; } void setZero(); void restart(); Scalar& coeffRef(Index i); Scalar& coeff(Index i); class Iterator; ~AmbiVector() { delete[] m_buffer; } void resize(Index size) { if (m_allocatedSize < size) reallocate(size); m_size = size; } Index size() const { return m_size; } protected: void reallocate(Index size) { // if the size of the matrix is not too large, let's allocate a bit more than needed such // that we can handle dense vector even in sparse mode. delete[] m_buffer; if (size<1000) { Index allocSize = (size * sizeof(ListEl) + sizeof(Scalar) - 1)/sizeof(Scalar); m_allocatedElements = (allocSize*sizeof(Scalar))/sizeof(ListEl); m_buffer = new Scalar[allocSize]; } else { m_allocatedElements = (size*sizeof(Scalar))/sizeof(ListEl); m_buffer = new Scalar[size]; } m_size = size; m_start = 0; m_end = m_size; } void reallocateSparse() { Index copyElements = m_allocatedElements; m_allocatedElements = (std::min)(Index(m_allocatedElements*1.5),m_size); Index allocSize = m_allocatedElements * sizeof(ListEl); allocSize = (allocSize + sizeof(Scalar) - 1)/sizeof(Scalar); Scalar* newBuffer = new Scalar[allocSize]; memcpy(newBuffer, m_buffer, copyElements * sizeof(ListEl)); delete[] m_buffer; m_buffer = newBuffer; } protected: // element type of the linked list struct ListEl { Index next; Index index; Scalar value; }; // used to store data in both mode Scalar* m_buffer; Scalar m_zero; Index m_size; Index m_start; Index m_end; Index m_allocatedSize; Index m_allocatedElements; Index m_mode; // linked list mode Index m_llStart; Index m_llCurrent; Index m_llSize; }; /** \returns the number of non zeros in the current sub vector */ template _Index AmbiVector<_Scalar,_Index>::nonZeros() const { if (m_mode==IsSparse) return m_llSize; else return m_end - m_start; } template void AmbiVector<_Scalar,_Index>::init(double estimatedDensity) { if (estimatedDensity>0.1) init(IsDense); else init(IsSparse); } template void AmbiVector<_Scalar,_Index>::init(int mode) { m_mode = mode; if (m_mode==IsSparse) { m_llSize = 0; m_llStart = -1; } } /** Must be called whenever we might perform a write access * with an index smaller than the previous one. * * Don't worry, this function is extremely cheap. */ template void AmbiVector<_Scalar,_Index>::restart() { m_llCurrent = m_llStart; } /** Set all coefficients of current subvector to zero */ template void AmbiVector<_Scalar,_Index>::setZero() { if (m_mode==IsDense) { for (Index i=m_start; i _Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i) { if (m_mode==IsDense) return m_buffer[i]; else { ListEl* EIGEN_RESTRICT llElements = reinterpret_cast(m_buffer); // TODO factorize the following code to reduce code generation eigen_assert(m_mode==IsSparse); if (m_llSize==0) { // this is the first element m_llStart = 0; m_llCurrent = 0; ++m_llSize; llElements[0].value = Scalar(0); llElements[0].index = i; llElements[0].next = -1; return llElements[0].value; } else if (i=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index"); while (nextel >= 0 && llElements[nextel].index<=i) { m_llCurrent = nextel; nextel = llElements[nextel].next; } if (llElements[m_llCurrent].index==i) { // the coefficient already exists and we found it ! return llElements[m_llCurrent].value; } else { if (m_llSize>=m_allocatedElements) { reallocateSparse(); llElements = reinterpret_cast(m_buffer); } eigen_internal_assert(m_llSize _Scalar& AmbiVector<_Scalar,_Index>::coeff(_Index i) { if (m_mode==IsDense) return m_buffer[i]; else { ListEl* EIGEN_RESTRICT llElements = reinterpret_cast(m_buffer); eigen_assert(m_mode==IsSparse); if ((m_llSize==0) || (i= 0 && llElements[elid].index class AmbiVector<_Scalar,_Index>::Iterator { public: typedef _Scalar Scalar; typedef typename NumTraits::Real RealScalar; /** Default constructor * \param vec the vector on which we iterate * \param epsilon the minimal value used to prune zero coefficients. * In practice, all coefficients having a magnitude smaller than \a epsilon * are skipped. */ Iterator(const AmbiVector& vec, const RealScalar& epsilon = 0) : m_vector(vec) { using std::abs; m_epsilon = epsilon; m_isDense = m_vector.m_mode==IsDense; if (m_isDense) { m_currentEl = 0; // this is to avoid a compilation warning m_cachedValue = 0; // this is to avoid a compilation warning m_cachedIndex = m_vector.m_start-1; ++(*this); } else { ListEl* EIGEN_RESTRICT llElements = reinterpret_cast(m_vector.m_buffer); m_currentEl = m_vector.m_llStart; while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<=m_epsilon) m_currentEl = llElements[m_currentEl].next; if (m_currentEl<0) { m_cachedValue = 0; // this is to avoid a compilation warning m_cachedIndex = -1; } else { m_cachedIndex = llElements[m_currentEl].index; m_cachedValue = llElements[m_currentEl].value; } } } Index index() const { return m_cachedIndex; } Scalar value() const { return m_cachedValue; } operator bool() const { return m_cachedIndex>=0; } Iterator& operator++() { using std::abs; if (m_isDense) { do { ++m_cachedIndex; } while (m_cachedIndex(m_vector.m_buffer); do { m_currentEl = llElements[m_currentEl].next; } while (m_currentEl>=0 && abs(llElements[m_currentEl].value) // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSETRIANGULARSOLVER_H #define EIGEN_SPARSETRIANGULARSOLVER_H namespace Eigen { namespace internal { template::Flags) & RowMajorBit> struct sparse_solve_triangular_selector; // forward substitution, row-major template struct sparse_solve_triangular_selector { typedef typename Rhs::Scalar Scalar; static void run(const Lhs& lhs, Rhs& other) { for(int col=0 ; col struct sparse_solve_triangular_selector { typedef typename Rhs::Scalar Scalar; static void run(const Lhs& lhs, Rhs& other) { for(int col=0 ; col=0 ; --i) { Scalar tmp = other.coeff(i,col); Scalar l_ii = 0; typename Lhs::InnerIterator it(lhs, i); while(it && it.index() struct sparse_solve_triangular_selector { typedef typename Rhs::Scalar Scalar; static void run(const Lhs& lhs, Rhs& other) { for(int col=0 ; col struct sparse_solve_triangular_selector { typedef typename Rhs::Scalar Scalar; static void run(const Lhs& lhs, Rhs& other) { for(int col=0 ; col=0; --i) { Scalar& tmp = other.coeffRef(i,col); if (tmp!=Scalar(0)) // optimization when other is actually sparse { if(!(Mode & UnitDiag)) { // TODO replace this by a binary search. make sure the binary search is safe for partially sorted elements typename Lhs::ReverseInnerIterator it(lhs, i); while(it && it.index()!=i) --it; eigen_assert(it && it.index()==i); other.coeffRef(i,col) /= it.value(); } typename Lhs::InnerIterator it(lhs, i); for(; it && it.index() template void SparseTriangularView::solveInPlace(MatrixBase& other) const { eigen_assert(m_matrix.cols() == m_matrix.rows() && m_matrix.cols() == other.rows()); eigen_assert((!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower))); enum { copy = internal::traits::Flags & RowMajorBit }; typedef typename internal::conditional::type, OtherDerived&>::type OtherCopy; OtherCopy otherCopy(other.derived()); internal::sparse_solve_triangular_selector::type, Mode>::run(m_matrix, otherCopy); if (copy) other = otherCopy; } template template typename internal::plain_matrix_type_column_major::type SparseTriangularView::solve(const MatrixBase& other) const { typename internal::plain_matrix_type_column_major::type res(other); solveInPlace(res); return res; } // pure sparse path namespace internal { template struct sparse_solve_triangular_sparse_selector; // forward substitution, col-major template struct sparse_solve_triangular_sparse_selector { typedef typename Rhs::Scalar Scalar; typedef typename promote_index_type::Index, typename traits::Index>::type Index; static void run(const Lhs& lhs, Rhs& other) { const bool IsLower = (UpLo==Lower); AmbiVector tempVector(other.rows()*2); tempVector.setBounds(0,other.rows()); Rhs res(other.rows(), other.cols()); res.reserve(other.nonZeros()); for(int col=0 ; col=0; i+=IsLower?1:-1) { tempVector.restart(); Scalar& ci = tempVector.coeffRef(i); if (ci!=Scalar(0)) { // find typename Lhs::InnerIterator it(lhs, i); if(!(Mode & UnitDiag)) { if (IsLower) { eigen_assert(it.index()==i); ci /= it.value(); } else ci /= lhs.coeff(i,i); } tempVector.restart(); if (IsLower) { if (it.index()==i) ++it; for(; it; ++it) tempVector.coeffRef(it.index()) -= ci * it.value(); } else { for(; it && it.index()::Iterator it(tempVector/*,1e-12*/); it; ++it) { ++ count; // std::cerr << "fill " << it.index() << ", " << col << "\n"; // std::cout << it.value() << " "; // FIXME use insertBack res.insert(it.index(), col) = it.value(); } // std::cout << "tempVector.nonZeros() == " << int(count) << " / " << (other.rows()) << "\n"; } res.finalize(); other = res.markAsRValue(); } }; } // end namespace internal template template void SparseTriangularView::solveInPlace(SparseMatrixBase& other) const { eigen_assert(m_matrix.cols() == m_matrix.rows() && m_matrix.cols() == other.rows()); eigen_assert( (!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower))); // enum { copy = internal::traits::Flags & RowMajorBit }; // typedef typename internal::conditional::type, OtherDerived&>::type OtherCopy; // OtherCopy otherCopy(other.derived()); internal::sparse_solve_triangular_sparse_selector::run(m_matrix, other.derived()); // if (copy) // other = otherCopy; } #ifdef EIGEN2_SUPPORT // deprecated stuff: /** \deprecated */ template template void SparseMatrixBase::solveTriangularInPlace(MatrixBase& other) const { this->template triangular().solveInPlace(other); } /** \deprecated */ template template typename internal::plain_matrix_type_column_major::type SparseMatrixBase::solveTriangular(const MatrixBase& other) const { typename internal::plain_matrix_type_column_major::type res(other); derived().solveTriangularInPlace(res); return res; } #endif // EIGEN2_SUPPORT } // end namespace Eigen #endif // EIGEN_SPARSETRIANGULARSOLVER_H antimony/lib/fab/vendor/Eigen/src/SparseCore/SparseDot.h0000644000175000017500000000613413341130426022441 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_DOT_H #define EIGEN_SPARSE_DOT_H namespace Eigen { template template typename internal::traits::Scalar SparseMatrixBase::dot(const MatrixBase& other) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived) EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) eigen_assert(size() == other.size()); eigen_assert(other.size()>0 && "you are using a non initialized vector"); typename Derived::InnerIterator i(derived(),0); Scalar res(0); while (i) { res += numext::conj(i.value()) * other.coeff(i.index()); ++i; } return res; } template template typename internal::traits::Scalar SparseMatrixBase::dot(const SparseMatrixBase& other) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived) EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) eigen_assert(size() == other.size()); typedef typename Derived::Nested Nested; typedef typename OtherDerived::Nested OtherNested; typedef typename internal::remove_all::type NestedCleaned; typedef typename internal::remove_all::type OtherNestedCleaned; Nested nthis(derived()); OtherNested nother(other.derived()); typename NestedCleaned::InnerIterator i(nthis,0); typename OtherNestedCleaned::InnerIterator j(nother,0); Scalar res(0); while (i && j) { if (i.index()==j.index()) { res += numext::conj(i.value()) * j.value(); ++i; ++j; } else if (i.index() inline typename NumTraits::Scalar>::Real SparseMatrixBase::squaredNorm() const { return numext::real((*this).cwiseAbs2().sum()); } template inline typename NumTraits::Scalar>::Real SparseMatrixBase::norm() const { using std::sqrt; return sqrt(squaredNorm()); } template inline typename NumTraits::Scalar>::Real SparseMatrixBase::blueNorm() const { return internal::blueNorm_impl(*this); } } // end namespace Eigen #endif // EIGEN_SPARSE_DOT_H antimony/lib/fab/vendor/Eigen/src/SparseCore/SparseDenseProduct.h0000644000175000017500000002533513341130426024316 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSEDENSEPRODUCT_H #define EIGEN_SPARSEDENSEPRODUCT_H namespace Eigen { template struct SparseDenseProductReturnType { typedef SparseTimeDenseProduct Type; }; template struct SparseDenseProductReturnType { typedef typename internal::conditional< Lhs::IsRowMajor, SparseDenseOuterProduct, SparseDenseOuterProduct >::type Type; }; template struct DenseSparseProductReturnType { typedef DenseTimeSparseProduct Type; }; template struct DenseSparseProductReturnType { typedef typename internal::conditional< Rhs::IsRowMajor, SparseDenseOuterProduct, SparseDenseOuterProduct >::type Type; }; namespace internal { template struct traits > { typedef Sparse StorageKind; typedef typename scalar_product_traits::Scalar, typename traits::Scalar>::ReturnType Scalar; typedef typename Lhs::Index Index; typedef typename Lhs::Nested LhsNested; typedef typename Rhs::Nested RhsNested; typedef typename remove_all::type _LhsNested; typedef typename remove_all::type _RhsNested; enum { LhsCoeffReadCost = traits<_LhsNested>::CoeffReadCost, RhsCoeffReadCost = traits<_RhsNested>::CoeffReadCost, RowsAtCompileTime = Tr ? int(traits::RowsAtCompileTime) : int(traits::RowsAtCompileTime), ColsAtCompileTime = Tr ? int(traits::ColsAtCompileTime) : int(traits::ColsAtCompileTime), MaxRowsAtCompileTime = Tr ? int(traits::MaxRowsAtCompileTime) : int(traits::MaxRowsAtCompileTime), MaxColsAtCompileTime = Tr ? int(traits::MaxColsAtCompileTime) : int(traits::MaxColsAtCompileTime), Flags = Tr ? RowMajorBit : 0, CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + NumTraits::MulCost }; }; } // end namespace internal template class SparseDenseOuterProduct : public SparseMatrixBase > { public: typedef SparseMatrixBase Base; EIGEN_DENSE_PUBLIC_INTERFACE(SparseDenseOuterProduct) typedef internal::traits Traits; private: typedef typename Traits::LhsNested LhsNested; typedef typename Traits::RhsNested RhsNested; typedef typename Traits::_LhsNested _LhsNested; typedef typename Traits::_RhsNested _RhsNested; public: class InnerIterator; EIGEN_STRONG_INLINE SparseDenseOuterProduct(const Lhs& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs) { EIGEN_STATIC_ASSERT(!Tr,YOU_MADE_A_PROGRAMMING_MISTAKE); } EIGEN_STRONG_INLINE SparseDenseOuterProduct(const Rhs& rhs, const Lhs& lhs) : m_lhs(lhs), m_rhs(rhs) { EIGEN_STATIC_ASSERT(Tr,YOU_MADE_A_PROGRAMMING_MISTAKE); } EIGEN_STRONG_INLINE Index rows() const { return Tr ? m_rhs.rows() : m_lhs.rows(); } EIGEN_STRONG_INLINE Index cols() const { return Tr ? m_lhs.cols() : m_rhs.cols(); } EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; } EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; } protected: LhsNested m_lhs; RhsNested m_rhs; }; template class SparseDenseOuterProduct::InnerIterator : public _LhsNested::InnerIterator { typedef typename _LhsNested::InnerIterator Base; typedef typename SparseDenseOuterProduct::Index Index; public: EIGEN_STRONG_INLINE InnerIterator(const SparseDenseOuterProduct& prod, Index outer) : Base(prod.lhs(), 0), m_outer(outer), m_factor(get(prod.rhs(), outer, typename internal::traits::StorageKind() )) { } inline Index outer() const { return m_outer; } inline Index row() const { return Transpose ? m_outer : Base::index(); } inline Index col() const { return Transpose ? Base::index() : m_outer; } inline Scalar value() const { return Base::value() * m_factor; } protected: static Scalar get(const _RhsNested &rhs, Index outer, Dense = Dense()) { return rhs.coeff(outer); } static Scalar get(const _RhsNested &rhs, Index outer, Sparse = Sparse()) { typename Traits::_RhsNested::InnerIterator it(rhs, outer); if (it && it.index()==0) return it.value(); return Scalar(0); } Index m_outer; Scalar m_factor; }; namespace internal { template struct traits > : traits, Lhs, Rhs> > { typedef Dense StorageKind; typedef MatrixXpr XprKind; }; template struct sparse_time_dense_product_impl; template struct sparse_time_dense_product_impl { typedef typename internal::remove_all::type Lhs; typedef typename internal::remove_all::type Rhs; typedef typename internal::remove_all::type Res; typedef typename Lhs::Index Index; typedef typename Lhs::InnerIterator LhsInnerIterator; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { for(Index c=0; c struct sparse_time_dense_product_impl { typedef typename internal::remove_all::type Lhs; typedef typename internal::remove_all::type Rhs; typedef typename internal::remove_all::type Res; typedef typename Lhs::InnerIterator LhsInnerIterator; typedef typename Lhs::Index Index; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { for(Index c=0; c struct sparse_time_dense_product_impl { typedef typename internal::remove_all::type Lhs; typedef typename internal::remove_all::type Rhs; typedef typename internal::remove_all::type Res; typedef typename Lhs::InnerIterator LhsInnerIterator; typedef typename Lhs::Index Index; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { for(Index j=0; j struct sparse_time_dense_product_impl { typedef typename internal::remove_all::type Lhs; typedef typename internal::remove_all::type Rhs; typedef typename internal::remove_all::type Res; typedef typename Lhs::InnerIterator LhsInnerIterator; typedef typename Lhs::Index Index; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { for(Index j=0; j inline void sparse_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha) { sparse_time_dense_product_impl::run(lhs, rhs, res, alpha); } } // end namespace internal template class SparseTimeDenseProduct : public ProductBase, Lhs, Rhs> { public: EIGEN_PRODUCT_PUBLIC_INTERFACE(SparseTimeDenseProduct) SparseTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {} template void scaleAndAddTo(Dest& dest, const Scalar& alpha) const { internal::sparse_time_dense_product(m_lhs, m_rhs, dest, alpha); } private: SparseTimeDenseProduct& operator=(const SparseTimeDenseProduct&); }; // dense = dense * sparse namespace internal { template struct traits > : traits, Lhs, Rhs> > { typedef Dense StorageKind; }; } // end namespace internal template class DenseTimeSparseProduct : public ProductBase, Lhs, Rhs> { public: EIGEN_PRODUCT_PUBLIC_INTERFACE(DenseTimeSparseProduct) DenseTimeSparseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {} template void scaleAndAddTo(Dest& dest, const Scalar& alpha) const { Transpose lhs_t(m_lhs); Transpose rhs_t(m_rhs); Transpose dest_t(dest); internal::sparse_time_dense_product(rhs_t, lhs_t, dest_t, alpha); } private: DenseTimeSparseProduct& operator=(const DenseTimeSparseProduct&); }; } // end namespace Eigen #endif // EIGEN_SPARSEDENSEPRODUCT_H antimony/lib/fab/vendor/Eigen/src/SparseCore/SparseMatrix.h0000644000175000017500000012764213341130426023167 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSEMATRIX_H #define EIGEN_SPARSEMATRIX_H namespace Eigen { /** \ingroup SparseCore_Module * * \class SparseMatrix * * \brief A versatible sparse matrix representation * * This class implements a more versatile variants of the common \em compressed row/column storage format. * Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index. * All the non zeros are stored in a single large buffer. Unlike the \em compressed format, there might be extra * space inbetween the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero * can be done with limited memory reallocation and copies. * * A call to the function makeCompressed() turns the matrix into the standard \em compressed format * compatible with many library. * * More details on this storage sceheme are given in the \ref TutorialSparse "manual pages". * * \tparam _Scalar the scalar type, i.e. the type of the coefficients * \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility * is ColMajor or RowMajor. The default is 0 which means column-major. * \tparam _Index the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int. * * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN. */ namespace internal { template struct traits > { typedef _Scalar Scalar; typedef _Index Index; typedef Sparse StorageKind; typedef MatrixXpr XprKind; enum { RowsAtCompileTime = Dynamic, ColsAtCompileTime = Dynamic, MaxRowsAtCompileTime = Dynamic, MaxColsAtCompileTime = Dynamic, Flags = _Options | NestByRefBit | LvalueBit, CoeffReadCost = NumTraits::ReadCost, SupportedAccessPatterns = InnerRandomAccessPattern }; }; template struct traits, DiagIndex> > { typedef SparseMatrix<_Scalar, _Options, _Index> MatrixType; typedef typename nested::type MatrixTypeNested; typedef typename remove_reference::type _MatrixTypeNested; typedef _Scalar Scalar; typedef Dense StorageKind; typedef _Index Index; typedef MatrixXpr XprKind; enum { RowsAtCompileTime = Dynamic, ColsAtCompileTime = 1, MaxRowsAtCompileTime = Dynamic, MaxColsAtCompileTime = 1, Flags = 0, CoeffReadCost = _MatrixTypeNested::CoeffReadCost*10 }; }; } // end namespace internal template class SparseMatrix : public SparseMatrixBase > { public: EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix) EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=) EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=) typedef MappedSparseMatrix Map; using Base::IsRowMajor; typedef internal::CompressedStorage Storage; enum { Options = _Options }; protected: typedef SparseMatrix TransposedSparseMatrix; Index m_outerSize; Index m_innerSize; Index* m_outerIndex; Index* m_innerNonZeros; // optional, if null then the data is compressed Storage m_data; Eigen::Map > innerNonZeros() { return Eigen::Map >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); } const Eigen::Map > innerNonZeros() const { return Eigen::Map >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); } public: /** \returns whether \c *this is in compressed form. */ inline bool isCompressed() const { return m_innerNonZeros==0; } /** \returns the number of rows of the matrix */ inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; } /** \returns the number of columns of the matrix */ inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; } /** \returns the number of rows (resp. columns) of the matrix if the storage order column major (resp. row major) */ inline Index innerSize() const { return m_innerSize; } /** \returns the number of columns (resp. rows) of the matrix if the storage order column major (resp. row major) */ inline Index outerSize() const { return m_outerSize; } /** \returns a const pointer to the array of values. * This function is aimed at interoperability with other libraries. * \sa innerIndexPtr(), outerIndexPtr() */ inline const Scalar* valuePtr() const { return &m_data.value(0); } /** \returns a non-const pointer to the array of values. * This function is aimed at interoperability with other libraries. * \sa innerIndexPtr(), outerIndexPtr() */ inline Scalar* valuePtr() { return &m_data.value(0); } /** \returns a const pointer to the array of inner indices. * This function is aimed at interoperability with other libraries. * \sa valuePtr(), outerIndexPtr() */ inline const Index* innerIndexPtr() const { return &m_data.index(0); } /** \returns a non-const pointer to the array of inner indices. * This function is aimed at interoperability with other libraries. * \sa valuePtr(), outerIndexPtr() */ inline Index* innerIndexPtr() { return &m_data.index(0); } /** \returns a const pointer to the array of the starting positions of the inner vectors. * This function is aimed at interoperability with other libraries. * \sa valuePtr(), innerIndexPtr() */ inline const Index* outerIndexPtr() const { return m_outerIndex; } /** \returns a non-const pointer to the array of the starting positions of the inner vectors. * This function is aimed at interoperability with other libraries. * \sa valuePtr(), innerIndexPtr() */ inline Index* outerIndexPtr() { return m_outerIndex; } /** \returns a const pointer to the array of the number of non zeros of the inner vectors. * This function is aimed at interoperability with other libraries. * \warning it returns the null pointer 0 in compressed mode */ inline const Index* innerNonZeroPtr() const { return m_innerNonZeros; } /** \returns a non-const pointer to the array of the number of non zeros of the inner vectors. * This function is aimed at interoperability with other libraries. * \warning it returns the null pointer 0 in compressed mode */ inline Index* innerNonZeroPtr() { return m_innerNonZeros; } /** \internal */ inline Storage& data() { return m_data; } /** \internal */ inline const Storage& data() const { return m_data; } /** \returns the value of the matrix at position \a i, \a j * This function returns Scalar(0) if the element is an explicit \em zero */ inline Scalar coeff(Index row, Index col) const { eigen_assert(row>=0 && row=0 && col=0 && row=0 && col=start && "you probably called coeffRef on a non finalized matrix"); if(end<=start) return insert(row,col); const Index p = m_data.searchLowerIndex(start,end-1,inner); if((p=0 && row=0 && col::Constant(outerSize(), 2)); } return insertUncompressed(row,col); } public: class InnerIterator; class ReverseInnerIterator; /** Removes all non zeros but keep allocated memory */ inline void setZero() { m_data.clear(); memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index)); if(m_innerNonZeros) memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(Index)); } /** \returns the number of non zero coefficients */ inline Index nonZeros() const { if(m_innerNonZeros) return innerNonZeros().sum(); return static_cast(m_data.size()); } /** Preallocates \a reserveSize non zeros. * * Precondition: the matrix must be in compressed mode. */ inline void reserve(Index reserveSize) { eigen_assert(isCompressed() && "This function does not make sense in non compressed mode."); m_data.reserve(reserveSize); } #ifdef EIGEN_PARSED_BY_DOXYGEN /** Preallocates \a reserveSize[\c j] non zeros for each column (resp. row) \c j. * * This function turns the matrix in non-compressed mode */ template inline void reserve(const SizesType& reserveSizes); #else template inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif = typename SizesType::value_type()) { EIGEN_UNUSED_VARIABLE(enableif); reserveInnerVectors(reserveSizes); } template inline void reserve(const SizesType& reserveSizes, const typename SizesType::Scalar& enableif = #if (!defined(_MSC_VER)) || (_MSC_VER>=1500) // MSVC 2005 fails to compile with this typename typename #endif SizesType::Scalar()) { EIGEN_UNUSED_VARIABLE(enableif); reserveInnerVectors(reserveSizes); } #endif // EIGEN_PARSED_BY_DOXYGEN protected: template inline void reserveInnerVectors(const SizesType& reserveSizes) { if(isCompressed()) { std::size_t totalReserveSize = 0; // turn the matrix into non-compressed mode m_innerNonZeros = static_cast(std::malloc(m_outerSize * sizeof(Index))); if (!m_innerNonZeros) internal::throw_std_bad_alloc(); // temporarily use m_innerSizes to hold the new starting points. Index* newOuterIndex = m_innerNonZeros; Index count = 0; for(Index j=0; j=0; --j) { Index innerNNZ = previousOuterIndex - m_outerIndex[j]; for(Index i=innerNNZ-1; i>=0; --i) { m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i); m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i); } previousOuterIndex = m_outerIndex[j]; m_outerIndex[j] = newOuterIndex[j]; m_innerNonZeros[j] = innerNNZ; } m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1]; m_data.resize(m_outerIndex[m_outerSize]); } else { Index* newOuterIndex = static_cast(std::malloc((m_outerSize+1)*sizeof(Index))); if (!newOuterIndex) internal::throw_std_bad_alloc(); Index count = 0; for(Index j=0; j(reserveSizes[j], alreadyReserved); count += toReserve + m_innerNonZeros[j]; } newOuterIndex[m_outerSize] = count; m_data.resize(count); for(Index j=m_outerSize-1; j>=0; --j) { Index offset = newOuterIndex[j] - m_outerIndex[j]; if(offset>0) { Index innerNNZ = m_innerNonZeros[j]; for(Index i=innerNNZ-1; i>=0; --i) { m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i); m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i); } } } std::swap(m_outerIndex, newOuterIndex); std::free(newOuterIndex); } } public: //--- low level purely coherent filling --- /** \internal * \returns a reference to the non zero coefficient at position \a row, \a col assuming that: * - the nonzero does not already exist * - the new coefficient is the last one according to the storage order * * Before filling a given inner vector you must call the statVec(Index) function. * * After an insertion session, you should call the finalize() function. * * \sa insert, insertBackByOuterInner, startVec */ inline Scalar& insertBack(Index row, Index col) { return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row); } /** \internal * \sa insertBack, startVec */ inline Scalar& insertBackByOuterInner(Index outer, Index inner) { eigen_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)"); eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)(m_data.size()); Index i = m_outerSize; // find the last filled column while (i>=0 && m_outerIndex[i]==0) --i; ++i; while (i<=m_outerSize) { m_outerIndex[i] = size; ++i; } } } //--- template void setFromTriplets(const InputIterators& begin, const InputIterators& end); void sumupDuplicates(); //--- /** \internal * same as insert(Index,Index) except that the indices are given relative to the storage order */ Scalar& insertByOuterInner(Index j, Index i) { return insert(IsRowMajor ? j : i, IsRowMajor ? i : j); } /** Turns the matrix into the \em compressed format. */ void makeCompressed() { if(isCompressed()) return; Index oldStart = m_outerIndex[1]; m_outerIndex[1] = m_innerNonZeros[0]; for(Index j=1; j0) { for(Index k=0; k(std::malloc(m_outerSize * sizeof(Index))); for (Index i = 0; i < m_outerSize; i++) { m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i]; } } /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerence \a epsilon */ void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits::dummy_precision()) { prune(default_prunning_func(reference,epsilon)); } /** Turns the matrix into compressed format, and suppresses all nonzeros which do not satisfy the predicate \a keep. * The functor type \a KeepFunc must implement the following function: * \code * bool operator() (const Index& row, const Index& col, const Scalar& value) const; * \endcode * \sa prune(Scalar,RealScalar) */ template void prune(const KeepFunc& keep = KeepFunc()) { // TODO optimize the uncompressed mode to avoid moving and allocating the data twice // TODO also implement a unit test makeCompressed(); Index k = 0; for(Index j=0; jrows() == rows && this->cols() == cols) return; // If one dimension is null, then there is nothing to be preserved if(rows==0 || cols==0) return resize(rows,cols); Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows(); Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols(); Index newInnerSize = IsRowMajor ? cols : rows; // Deals with inner non zeros if (m_innerNonZeros) { // Resize m_innerNonZeros Index *newInnerNonZeros = static_cast(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(Index))); if (!newInnerNonZeros) internal::throw_std_bad_alloc(); m_innerNonZeros = newInnerNonZeros; for(Index i=m_outerSize; i(std::malloc((m_outerSize+outerChange+1) * sizeof(Index))); if (!m_innerNonZeros) internal::throw_std_bad_alloc(); for(Index i = 0; i < m_outerSize; i++) m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i]; } // Change the m_innerNonZeros in case of a decrease of inner size if (m_innerNonZeros && innerChange < 0) { for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++) { Index &n = m_innerNonZeros[i]; Index start = m_outerIndex[i]; while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n; } } m_innerSize = newInnerSize; // Re-allocate outer index structure if necessary if (outerChange == 0) return; Index *newOuterIndex = static_cast(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(Index))); if (!newOuterIndex) internal::throw_std_bad_alloc(); m_outerIndex = newOuterIndex; if (outerChange > 0) { Index last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize]; for(Index i=m_outerSize; i(std::malloc((outerSize + 1) * sizeof(Index))); if (!m_outerIndex) internal::throw_std_bad_alloc(); m_outerSize = outerSize; } if(m_innerNonZeros) { std::free(m_innerNonZeros); m_innerNonZeros = 0; } memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index)); } /** \internal * Resize the nonzero vector to \a size */ void resizeNonZeros(Index size) { // TODO remove this function m_data.resize(size); } /** \returns a const expression of the diagonal coefficients */ const Diagonal diagonal() const { return *this; } /** Default constructor yielding an empty \c 0 \c x \c 0 matrix */ inline SparseMatrix() : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) { check_template_parameters(); resize(0, 0); } /** Constructs a \a rows \c x \a cols empty matrix */ inline SparseMatrix(Index rows, Index cols) : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) { check_template_parameters(); resize(rows, cols); } /** Constructs a sparse matrix from the sparse expression \a other */ template inline SparseMatrix(const SparseMatrixBase& other) : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) { EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) check_template_parameters(); *this = other.derived(); } /** Constructs a sparse matrix from the sparse selfadjoint view \a other */ template inline SparseMatrix(const SparseSelfAdjointView& other) : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) { check_template_parameters(); *this = other; } /** Copy constructor (it performs a deep copy) */ inline SparseMatrix(const SparseMatrix& other) : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) { check_template_parameters(); *this = other.derived(); } /** \brief Copy constructor with in-place evaluation */ template SparseMatrix(const ReturnByValue& other) : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) { check_template_parameters(); initAssignment(other); other.evalTo(*this); } /** Swaps the content of two sparse matrices of the same type. * This is a fast operation that simply swaps the underlying pointers and parameters. */ inline void swap(SparseMatrix& other) { //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n"); std::swap(m_outerIndex, other.m_outerIndex); std::swap(m_innerSize, other.m_innerSize); std::swap(m_outerSize, other.m_outerSize); std::swap(m_innerNonZeros, other.m_innerNonZeros); m_data.swap(other.m_data); } /** Sets *this to the identity matrix */ inline void setIdentity() { eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES"); this->m_data.resize(rows()); Eigen::Map >(&this->m_data.index(0), rows()).setLinSpaced(0, rows()-1); Eigen::Map >(&this->m_data.value(0), rows()).setOnes(); Eigen::Map >(this->m_outerIndex, rows()+1).setLinSpaced(0, rows()); } inline SparseMatrix& operator=(const SparseMatrix& other) { if (other.isRValue()) { swap(other.const_cast_derived()); } else if(this!=&other) { initAssignment(other); if(other.isCompressed()) { memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(Index)); m_data = other.m_data; } else { Base::operator=(other); } } return *this; } #ifndef EIGEN_PARSED_BY_DOXYGEN template inline SparseMatrix& operator=(const SparseSparseProduct& product) { return Base::operator=(product); } template inline SparseMatrix& operator=(const ReturnByValue& other) { initAssignment(other); return Base::operator=(other.derived()); } template inline SparseMatrix& operator=(const EigenBase& other) { return Base::operator=(other.derived()); } #endif template EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase& other); friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m) { EIGEN_DBG_SPARSE( s << "Nonzero entries:\n"; if(m.isCompressed()) for (Index i=0; i&>(m); return s; } /** Destructor */ inline ~SparseMatrix() { std::free(m_outerIndex); std::free(m_innerNonZeros); } #ifndef EIGEN_PARSED_BY_DOXYGEN /** Overloaded for performance */ Scalar sum() const; #endif # ifdef EIGEN_SPARSEMATRIX_PLUGIN # include EIGEN_SPARSEMATRIX_PLUGIN # endif protected: template void initAssignment(const Other& other) { resize(other.rows(), other.cols()); if(m_innerNonZeros) { std::free(m_innerNonZeros); m_innerNonZeros = 0; } } /** \internal * \sa insert(Index,Index) */ EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col); /** \internal * A vector object that is equal to 0 everywhere but v at the position i */ class SingletonVector { Index m_index; Index m_value; public: typedef Index value_type; SingletonVector(Index i, Index v) : m_index(i), m_value(v) {} Index operator[](Index i) const { return i==m_index ? m_value : 0; } }; /** \internal * \sa insert(Index,Index) */ EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col); public: /** \internal * \sa insert(Index,Index) */ EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col) { const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; eigen_assert(!isCompressed()); eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer])); Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++; m_data.index(p) = inner; return (m_data.value(p) = 0); } private: static void check_template_parameters() { EIGEN_STATIC_ASSERT(NumTraits::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE); EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS); } struct default_prunning_func { default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {} inline bool operator() (const Index&, const Index&, const Scalar& value) const { return !internal::isMuchSmallerThan(value, reference, epsilon); } Scalar reference; RealScalar epsilon; }; }; template class SparseMatrix::InnerIterator { public: InnerIterator(const SparseMatrix& mat, Index outer) : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_id(mat.m_outerIndex[outer]) { if(mat.isCompressed()) m_end = mat.m_outerIndex[outer+1]; else m_end = m_id + mat.m_innerNonZeros[outer]; } inline InnerIterator& operator++() { m_id++; return *this; } inline const Scalar& value() const { return m_values[m_id]; } inline Scalar& valueRef() { return const_cast(m_values[m_id]); } inline Index index() const { return m_indices[m_id]; } inline Index outer() const { return m_outer; } inline Index row() const { return IsRowMajor ? m_outer : index(); } inline Index col() const { return IsRowMajor ? index() : m_outer; } inline operator bool() const { return (m_id < m_end); } protected: const Scalar* m_values; const Index* m_indices; const Index m_outer; Index m_id; Index m_end; }; template class SparseMatrix::ReverseInnerIterator { public: ReverseInnerIterator(const SparseMatrix& mat, Index outer) : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_start(mat.m_outerIndex[outer]) { if(mat.isCompressed()) m_id = mat.m_outerIndex[outer+1]; else m_id = m_start + mat.m_innerNonZeros[outer]; } inline ReverseInnerIterator& operator--() { --m_id; return *this; } inline const Scalar& value() const { return m_values[m_id-1]; } inline Scalar& valueRef() { return const_cast(m_values[m_id-1]); } inline Index index() const { return m_indices[m_id-1]; } inline Index outer() const { return m_outer; } inline Index row() const { return IsRowMajor ? m_outer : index(); } inline Index col() const { return IsRowMajor ? index() : m_outer; } inline operator bool() const { return (m_id > m_start); } protected: const Scalar* m_values; const Index* m_indices; const Index m_outer; Index m_id; const Index m_start; }; namespace internal { template void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, int Options = 0) { EIGEN_UNUSED_VARIABLE(Options); enum { IsRowMajor = SparseMatrixType::IsRowMajor }; typedef typename SparseMatrixType::Scalar Scalar; typedef typename SparseMatrixType::Index Index; SparseMatrix trMat(mat.rows(),mat.cols()); if(begin!=end) { // pass 1: count the nnz per inner-vector Matrix wi(trMat.outerSize()); wi.setZero(); for(InputIterator it(begin); it!=end; ++it) { eigen_assert(it->row()>=0 && it->row()col()>=0 && it->col()col() : it->row())++; } // pass 2: insert all the elements into trMat trMat.reserve(wi); for(InputIterator it(begin); it!=end; ++it) trMat.insertBackUncompressed(it->row(),it->col()) = it->value(); // pass 3: trMat.sumupDuplicates(); } // pass 4: transposed copy -> implicit sorting mat = trMat; } } /** Fill the matrix \c *this with the list of \em triplets defined by the iterator range \a begin - \a end. * * A \em triplet is a tuple (i,j,value) defining a non-zero element. * The input list of triplets does not have to be sorted, and can contains duplicated elements. * In any case, the result is a \b sorted and \b compressed sparse matrix where the duplicates have been summed up. * This is a \em O(n) operation, with \em n the number of triplet elements. * The initial contents of \c *this is destroyed. * The matrix \c *this must be properly resized beforehand using the SparseMatrix(Index,Index) constructor, * or the resize(Index,Index) method. The sizes are not extracted from the triplet list. * * The \a InputIterators value_type must provide the following interface: * \code * Scalar value() const; // the value * Scalar row() const; // the row index i * Scalar col() const; // the column index j * \endcode * See for instance the Eigen::Triplet template class. * * Here is a typical usage example: * \code typedef Triplet T; std::vector tripletList; triplets.reserve(estimation_of_entries); for(...) { // ... tripletList.push_back(T(i,j,v_ij)); } SparseMatrixType m(rows,cols); m.setFromTriplets(tripletList.begin(), tripletList.end()); // m is ready to go! * \endcode * * \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define * an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather * be explicitely stored into a std::vector for instance. */ template template void SparseMatrix::setFromTriplets(const InputIterators& begin, const InputIterators& end) { internal::set_from_triplets(begin, end, *this); } /** \internal */ template void SparseMatrix::sumupDuplicates() { eigen_assert(!isCompressed()); // TODO, in practice we should be able to use m_innerNonZeros for that task Matrix wi(innerSize()); wi.fill(-1); Index count = 0; // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers for(Index j=0; j=start) { // we already meet this entry => accumulate it m_data.value(wi(i)) += m_data.value(k); } else { m_data.value(count) = m_data.value(k); m_data.index(count) = m_data.index(k); wi(i) = count; ++count; } } m_outerIndex[j] = start; } m_outerIndex[m_outerSize] = count; // turn the matrix into compressed form std::free(m_innerNonZeros); m_innerNonZeros = 0; m_data.resize(m_outerIndex[m_outerSize]); } template template EIGEN_DONT_INLINE SparseMatrix& SparseMatrix::operator=(const SparseMatrixBase& other) { EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit); if (needToTranspose) { // two passes algorithm: // 1 - compute the number of coeffs per dest inner vector // 2 - do the actual copy/eval // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed typedef typename internal::nested::type OtherCopy; typedef typename internal::remove_all::type _OtherCopy; OtherCopy otherCopy(other.derived()); SparseMatrix dest(other.rows(),other.cols()); Eigen::Map > (dest.m_outerIndex,dest.outerSize()).setZero(); // pass 1 // FIXME the above copy could be merged with that pass for (Index j=0; j positions(dest.outerSize()); for (Index j=0; jswap(dest); return *this; } else { if(other.isRValue()) initAssignment(other.derived()); // there is no special optimization return Base::operator=(other.derived()); } } template EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertUncompressed(Index row, Index col) { eigen_assert(!isCompressed()); const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; Index room = m_outerIndex[outer+1] - m_outerIndex[outer]; Index innerNNZ = m_innerNonZeros[outer]; if(innerNNZ>=room) { // this inner vector is full, we need to reallocate the whole buffer :( reserve(SingletonVector(outer,std::max(2,innerNNZ))); } Index startId = m_outerIndex[outer]; Index p = startId + m_innerNonZeros[outer]; while ( (p > startId) && (m_data.index(p-1) > inner) ) { m_data.index(p) = m_data.index(p-1); m_data.value(p) = m_data.value(p-1); --p; } eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exist, you must call coeffRef to this end"); m_innerNonZeros[outer]++; m_data.index(p) = inner; return (m_data.value(p) = 0); } template EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertCompressed(Index row, Index col) { eigen_assert(isCompressed()); const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; Index previousOuter = outer; if (m_outerIndex[outer+1]==0) { // we start a new inner vector while (previousOuter>=0 && m_outerIndex[previousOuter]==0) { m_outerIndex[previousOuter] = static_cast(m_data.size()); --previousOuter; } m_outerIndex[outer+1] = m_outerIndex[outer]; } // here we have to handle the tricky case where the outerIndex array // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g., // the 2nd inner vector... bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0)) && (size_t(m_outerIndex[outer+1]) == m_data.size()); size_t startId = m_outerIndex[outer]; // FIXME let's make sure sizeof(long int) == sizeof(size_t) size_t p = m_outerIndex[outer+1]; ++m_outerIndex[outer+1]; double reallocRatio = 1; if (m_data.allocatedSize()<=m_data.size()) { // if there is no preallocated memory, let's reserve a minimum of 32 elements if (m_data.size()==0) { m_data.reserve(32); } else { // we need to reallocate the data, to reduce multiple reallocations // we use a smart resize algorithm based on the current filling ratio // in addition, we use double to avoid integers overflows double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1); reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size()); // furthermore we bound the realloc ratio to: // 1) reduce multiple minor realloc when the matrix is almost filled // 2) avoid to allocate too much memory when the matrix is almost empty reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.); } } m_data.resize(m_data.size()+1,reallocRatio); if (!isLastVec) { if (previousOuter==-1) { // oops wrong guess. // let's correct the outer offsets for (Index k=0; k<=(outer+1); ++k) m_outerIndex[k] = 0; Index k=outer+1; while(m_outerIndex[k]==0) m_outerIndex[k++] = 1; while (k<=m_outerSize && m_outerIndex[k]!=0) m_outerIndex[k++]++; p = 0; --k; k = m_outerIndex[k]-1; while (k>0) { m_data.index(k) = m_data.index(k-1); m_data.value(k) = m_data.value(k-1); k--; } } else { // we are not inserting into the last inner vec // update outer indices: Index j = outer+2; while (j<=m_outerSize && m_outerIndex[j]!=0) m_outerIndex[j++]++; --j; // shift data of last vecs: Index k = m_outerIndex[j]-1; while (k>=Index(p)) { m_data.index(k) = m_data.index(k-1); m_data.value(k) = m_data.value(k-1); k--; } } } while ( (p > startId) && (m_data.index(p-1) > inner) ) { m_data.index(p) = m_data.index(p-1); m_data.value(p) = m_data.value(p-1); --p; } m_data.index(p) = inner; return (m_data.value(p) = 0); } } // end namespace Eigen #endif // EIGEN_SPARSEMATRIX_H antimony/lib/fab/vendor/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h0000644000175000017500000002104313341130426027076 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2011 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H #define EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H namespace Eigen { namespace internal { template static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res) { typedef typename remove_all::type::Scalar Scalar; typedef typename remove_all::type::Index Index; // make sure to call innerSize/outerSize since we fake the storage order. Index rows = lhs.innerSize(); Index cols = rhs.outerSize(); eigen_assert(lhs.outerSize() == rhs.innerSize()); std::vector mask(rows,false); Matrix values(rows); Matrix indices(rows); // estimate the number of non zero entries // given a rhs column containing Y non zeros, we assume that the respective Y columns // of the lhs differs in average of one non zeros, thus the number of non zeros for // the product of a rhs column with the lhs is X+Y where X is the average number of non zero // per column of the lhs. // Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs) Index estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros(); res.setZero(); res.reserve(Index(estimated_nnz_prod)); // we compute each column of the result, one after the other for (Index j=0; j use a quick sort // otherwise => loop through the entire vector // In order to avoid to perform an expensive log2 when the // result is clearly very sparse we use a linear bound up to 200. //if((nnz<200 && nnz1) std::sort(indices.data(),indices.data()+nnz); for(Index k=0; k::Flags&RowMajorBit) ? RowMajor : ColMajor, int RhsStorageOrder = (traits::Flags&RowMajorBit) ? RowMajor : ColMajor, int ResStorageOrder = (traits::Flags&RowMajorBit) ? RowMajor : ColMajor> struct conservative_sparse_sparse_product_selector; template struct conservative_sparse_sparse_product_selector { typedef typename remove_all::type LhsCleaned; typedef typename LhsCleaned::Scalar Scalar; static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { typedef SparseMatrix RowMajorMatrix; typedef SparseMatrix ColMajorMatrix; ColMajorMatrix resCol(lhs.rows(),rhs.cols()); internal::conservative_sparse_sparse_product_impl(lhs, rhs, resCol); // sort the non zeros: RowMajorMatrix resRow(resCol); res = resRow; } }; template struct conservative_sparse_sparse_product_selector { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { typedef SparseMatrix RowMajorMatrix; RowMajorMatrix rhsRow = rhs; RowMajorMatrix resRow(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(rhsRow, lhs, resRow); res = resRow; } }; template struct conservative_sparse_sparse_product_selector { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { typedef SparseMatrix RowMajorMatrix; RowMajorMatrix lhsRow = lhs; RowMajorMatrix resRow(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(rhs, lhsRow, resRow); res = resRow; } }; template struct conservative_sparse_sparse_product_selector { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { typedef SparseMatrix RowMajorMatrix; RowMajorMatrix resRow(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(rhs, lhs, resRow); res = resRow; } }; template struct conservative_sparse_sparse_product_selector { typedef typename traits::type>::Scalar Scalar; static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { typedef SparseMatrix ColMajorMatrix; ColMajorMatrix resCol(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(lhs, rhs, resCol); res = resCol; } }; template struct conservative_sparse_sparse_product_selector { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { typedef SparseMatrix ColMajorMatrix; ColMajorMatrix lhsCol = lhs; ColMajorMatrix resCol(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(lhsCol, rhs, resCol); res = resCol; } }; template struct conservative_sparse_sparse_product_selector { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { typedef SparseMatrix ColMajorMatrix; ColMajorMatrix rhsCol = rhs; ColMajorMatrix resCol(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(lhs, rhsCol, resCol); res = resCol; } }; template struct conservative_sparse_sparse_product_selector { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { typedef SparseMatrix RowMajorMatrix; typedef SparseMatrix ColMajorMatrix; RowMajorMatrix resRow(lhs.rows(),rhs.cols()); internal::conservative_sparse_sparse_product_impl(rhs, lhs, resRow); // sort the non zeros: ColMajorMatrix resCol(resRow); res = resCol; } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H antimony/lib/fab/vendor/Eigen/src/SparseCore/SparseMatrixBase.h0000644000175000017500000004363613341130426023762 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2011 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSEMATRIXBASE_H #define EIGEN_SPARSEMATRIXBASE_H namespace Eigen { /** \ingroup SparseCore_Module * * \class SparseMatrixBase * * \brief Base class of any sparse matrices or sparse expressions * * \tparam Derived * * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIXBASE_PLUGIN. */ template class SparseMatrixBase : public EigenBase { public: typedef typename internal::traits::Scalar Scalar; typedef typename internal::packet_traits::type PacketScalar; typedef typename internal::traits::StorageKind StorageKind; typedef typename internal::traits::Index Index; typedef typename internal::add_const_on_value_type_if_arithmetic< typename internal::packet_traits::type >::type PacketReturnType; typedef SparseMatrixBase StorageBaseType; typedef EigenBase Base; template Derived& operator=(const EigenBase &other) { other.derived().evalTo(derived()); return derived(); } enum { RowsAtCompileTime = internal::traits::RowsAtCompileTime, /**< The number of rows at compile-time. This is just a copy of the value provided * by the \a Derived type. If a value is not known at compile-time, * it is set to the \a Dynamic constant. * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */ ColsAtCompileTime = internal::traits::ColsAtCompileTime, /**< The number of columns at compile-time. This is just a copy of the value provided * by the \a Derived type. If a value is not known at compile-time, * it is set to the \a Dynamic constant. * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */ SizeAtCompileTime = (internal::size_at_compile_time::RowsAtCompileTime, internal::traits::ColsAtCompileTime>::ret), /**< This is equal to the number of coefficients, i.e. the number of * rows times the number of columns, or to \a Dynamic if this is not * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */ MaxRowsAtCompileTime = RowsAtCompileTime, MaxColsAtCompileTime = ColsAtCompileTime, MaxSizeAtCompileTime = (internal::size_at_compile_time::ret), IsVectorAtCompileTime = RowsAtCompileTime == 1 || ColsAtCompileTime == 1, /**< This is set to true if either the number of rows or the number of * columns is known at compile-time to be equal to 1. Indeed, in that case, * we are dealing with a column-vector (if there is only one column) or with * a row-vector (if there is only one row). */ Flags = internal::traits::Flags, /**< This stores expression \ref flags flags which may or may not be inherited by new expressions * constructed from this one. See the \ref flags "list of flags". */ CoeffReadCost = internal::traits::CoeffReadCost, /**< This is a rough measure of how expensive it is to read one coefficient from * this expression. */ IsRowMajor = Flags&RowMajorBit ? 1 : 0, InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? int(SizeAtCompileTime) : int(IsRowMajor) ? int(ColsAtCompileTime) : int(RowsAtCompileTime), #ifndef EIGEN_PARSED_BY_DOXYGEN _HasDirectAccess = (int(Flags)&DirectAccessBit) ? 1 : 0 // workaround sunCC #endif }; /** \internal the return type of MatrixBase::adjoint() */ typedef typename internal::conditional::IsComplex, CwiseUnaryOp, Eigen::Transpose >, Transpose >::type AdjointReturnType; typedef SparseMatrix PlainObject; #ifndef EIGEN_PARSED_BY_DOXYGEN /** This is the "real scalar" type; if the \a Scalar type is already real numbers * (e.g. int, float or double) then \a RealScalar is just the same as \a Scalar. If * \a Scalar is \a std::complex then RealScalar is \a T. * * \sa class NumTraits */ typedef typename NumTraits::Real RealScalar; /** \internal the return type of coeff() */ typedef typename internal::conditional<_HasDirectAccess, const Scalar&, Scalar>::type CoeffReturnType; /** \internal Represents a matrix with all coefficients equal to one another*/ typedef CwiseNullaryOp,Matrix > ConstantReturnType; /** type of the equivalent square matrix */ typedef Matrix SquareMatrixType; inline const Derived& derived() const { return *static_cast(this); } inline Derived& derived() { return *static_cast(this); } inline Derived& const_cast_derived() const { return *static_cast(const_cast(this)); } #endif // not EIGEN_PARSED_BY_DOXYGEN #define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::SparseMatrixBase # include "../plugins/CommonCwiseUnaryOps.h" # include "../plugins/CommonCwiseBinaryOps.h" # include "../plugins/MatrixCwiseUnaryOps.h" # include "../plugins/MatrixCwiseBinaryOps.h" # include "../plugins/BlockMethods.h" # ifdef EIGEN_SPARSEMATRIXBASE_PLUGIN # include EIGEN_SPARSEMATRIXBASE_PLUGIN # endif # undef EIGEN_CURRENT_STORAGE_BASE_CLASS #undef EIGEN_CURRENT_STORAGE_BASE_CLASS /** \returns the number of rows. \sa cols() */ inline Index rows() const { return derived().rows(); } /** \returns the number of columns. \sa rows() */ inline Index cols() const { return derived().cols(); } /** \returns the number of coefficients, which is \a rows()*cols(). * \sa rows(), cols(). */ inline Index size() const { return rows() * cols(); } /** \returns the number of nonzero coefficients which is in practice the number * of stored coefficients. */ inline Index nonZeros() const { return derived().nonZeros(); } /** \returns true if either the number of rows or the number of columns is equal to 1. * In other words, this function returns * \code rows()==1 || cols()==1 \endcode * \sa rows(), cols(), IsVectorAtCompileTime. */ inline bool isVector() const { return rows()==1 || cols()==1; } /** \returns the size of the storage major dimension, * i.e., the number of columns for a columns major matrix, and the number of rows otherwise */ Index outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); } /** \returns the size of the inner dimension according to the storage order, * i.e., the number of rows for a columns major matrix, and the number of cols otherwise */ Index innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); } bool isRValue() const { return m_isRValue; } Derived& markAsRValue() { m_isRValue = true; return derived(); } SparseMatrixBase() : m_isRValue(false) { /* TODO check flags */ } template Derived& operator=(const ReturnByValue& other) { other.evalTo(derived()); return derived(); } template inline Derived& operator=(const SparseMatrixBase& other) { return assign(other.derived()); } inline Derived& operator=(const Derived& other) { // if (other.isRValue()) // derived().swap(other.const_cast_derived()); // else return assign(other.derived()); } protected: template inline Derived& assign(const OtherDerived& other) { const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit); const Index outerSize = (int(OtherDerived::Flags) & RowMajorBit) ? other.rows() : other.cols(); if ((!transpose) && other.isRValue()) { // eval without temporary derived().resize(other.rows(), other.cols()); derived().setZero(); derived().reserve((std::max)(this->rows(),this->cols())*2); for (Index j=0; j inline void assignGeneric(const OtherDerived& other) { //const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit); eigen_assert(( ((internal::traits::SupportedAccessPatterns&OuterRandomAccessPattern)==OuterRandomAccessPattern) || (!((Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit)))) && "the transpose operation is supposed to be handled in SparseMatrix::operator="); enum { Flip = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit) }; const Index outerSize = other.outerSize(); //typedef typename internal::conditional, Derived>::type TempType; // thanks to shallow copies, we always eval to a tempary Derived temp(other.rows(), other.cols()); temp.reserve((std::max)(this->rows(),this->cols())*2); for (Index j=0; j inline Derived& operator=(const SparseSparseProduct& product); friend std::ostream & operator << (std::ostream & s, const SparseMatrixBase& m) { typedef typename Derived::Nested Nested; typedef typename internal::remove_all::type NestedCleaned; if (Flags&RowMajorBit) { const Nested nm(m.derived()); for (Index row=0; row trans = m; s << static_cast >&>(trans); } } return s; } template Derived& operator+=(const SparseMatrixBase& other); template Derived& operator-=(const SparseMatrixBase& other); Derived& operator*=(const Scalar& other); Derived& operator/=(const Scalar& other); #define EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE \ CwiseBinaryOp< \ internal::scalar_product_op< \ typename internal::scalar_product_traits< \ typename internal::traits::Scalar, \ typename internal::traits::Scalar \ >::ReturnType \ >, \ const Derived, \ const OtherDerived \ > template EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE cwiseProduct(const MatrixBase &other) const; // sparse * sparse template const typename SparseSparseProductReturnType::Type operator*(const SparseMatrixBase &other) const; // sparse * diagonal template const SparseDiagonalProduct operator*(const DiagonalBase &other) const; // diagonal * sparse template friend const SparseDiagonalProduct operator*(const DiagonalBase &lhs, const SparseMatrixBase& rhs) { return SparseDiagonalProduct(lhs.derived(), rhs.derived()); } /** dense * sparse (return a dense object unless it is an outer product) */ template friend const typename DenseSparseProductReturnType::Type operator*(const MatrixBase& lhs, const Derived& rhs) { return typename DenseSparseProductReturnType::Type(lhs.derived(),rhs); } /** sparse * dense (returns a dense object unless it is an outer product) */ template const typename SparseDenseProductReturnType::Type operator*(const MatrixBase &other) const { return typename SparseDenseProductReturnType::Type(derived(), other.derived()); } /** \returns an expression of P H P^-1 where H is the matrix represented by \c *this */ SparseSymmetricPermutationProduct twistedBy(const PermutationMatrix& perm) const { return SparseSymmetricPermutationProduct(derived(), perm); } template Derived& operator*=(const SparseMatrixBase& other); #ifdef EIGEN2_SUPPORT // deprecated template typename internal::plain_matrix_type_column_major::type solveTriangular(const MatrixBase& other) const; // deprecated template void solveTriangularInPlace(MatrixBase& other) const; #endif // EIGEN2_SUPPORT template inline const SparseTriangularView triangularView() const; template inline const SparseSelfAdjointView selfadjointView() const; template inline SparseSelfAdjointView selfadjointView(); template Scalar dot(const MatrixBase& other) const; template Scalar dot(const SparseMatrixBase& other) const; RealScalar squaredNorm() const; RealScalar norm() const; RealScalar blueNorm() const; Transpose transpose() { return derived(); } const Transpose transpose() const { return derived(); } const AdjointReturnType adjoint() const { return transpose(); } // inner-vector typedef Block InnerVectorReturnType; typedef Block ConstInnerVectorReturnType; InnerVectorReturnType innerVector(Index outer); const ConstInnerVectorReturnType innerVector(Index outer) const; // set of inner-vectors Block innerVectors(Index outerStart, Index outerSize); const Block innerVectors(Index outerStart, Index outerSize) const; /** \internal use operator= */ template void evalTo(MatrixBase& dst) const { dst.setZero(); for (Index j=0; j toDense() const { return derived(); } template bool isApprox(const SparseMatrixBase& other, const RealScalar& prec = NumTraits::dummy_precision()) const { return toDense().isApprox(other.toDense(),prec); } template bool isApprox(const MatrixBase& other, const RealScalar& prec = NumTraits::dummy_precision()) const { return toDense().isApprox(other,prec); } /** \returns the matrix or vector obtained by evaluating this expression. * * Notice that in the case of a plain matrix or vector (not an expression) this function just returns * a const reference, in order to avoid a useless copy. */ inline const typename internal::eval::type eval() const { return typename internal::eval::type(derived()); } Scalar sum() const; protected: bool m_isRValue; }; } // end namespace Eigen #endif // EIGEN_SPARSEMATRIXBASE_H antimony/lib/fab/vendor/Eigen/src/SparseCore/SparseTranspose.h0000644000175000017500000000467513341130426023701 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSETRANSPOSE_H #define EIGEN_SPARSETRANSPOSE_H namespace Eigen { template class TransposeImpl : public SparseMatrixBase > { typedef typename internal::remove_all::type _MatrixTypeNested; public: EIGEN_SPARSE_PUBLIC_INTERFACE(Transpose ) class InnerIterator; class ReverseInnerIterator; inline Index nonZeros() const { return derived().nestedExpression().nonZeros(); } }; // NOTE: VC10 and VC11 trigger an ICE if don't put typename TransposeImpl:: in front of Index, // a typedef typename TransposeImpl::Index Index; // does not fix the issue. // An alternative is to define the nested class in the parent class itself. template class TransposeImpl::InnerIterator : public _MatrixTypeNested::InnerIterator { typedef typename _MatrixTypeNested::InnerIterator Base; typedef typename TransposeImpl::Index Index; public: EIGEN_STRONG_INLINE InnerIterator(const TransposeImpl& trans, typename TransposeImpl::Index outer) : Base(trans.derived().nestedExpression(), outer) {} typename TransposeImpl::Index row() const { return Base::col(); } typename TransposeImpl::Index col() const { return Base::row(); } }; template class TransposeImpl::ReverseInnerIterator : public _MatrixTypeNested::ReverseInnerIterator { typedef typename _MatrixTypeNested::ReverseInnerIterator Base; typedef typename TransposeImpl::Index Index; public: EIGEN_STRONG_INLINE ReverseInnerIterator(const TransposeImpl& xpr, typename TransposeImpl::Index outer) : Base(xpr.derived().nestedExpression(), outer) {} typename TransposeImpl::Index row() const { return Base::col(); } typename TransposeImpl::Index col() const { return Base::row(); } }; } // end namespace Eigen #endif // EIGEN_SPARSETRANSPOSE_H antimony/lib/fab/vendor/Eigen/src/SparseCore/SparseProduct.h0000644000175000017500000001534513341130426023337 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSEPRODUCT_H #define EIGEN_SPARSEPRODUCT_H namespace Eigen { template struct SparseSparseProductReturnType { typedef typename internal::traits::Scalar Scalar; typedef typename internal::traits::Index Index; enum { LhsRowMajor = internal::traits::Flags & RowMajorBit, RhsRowMajor = internal::traits::Flags & RowMajorBit, TransposeRhs = (!LhsRowMajor) && RhsRowMajor, TransposeLhs = LhsRowMajor && (!RhsRowMajor) }; typedef typename internal::conditional, typename internal::nested::type>::type LhsNested; typedef typename internal::conditional, typename internal::nested::type>::type RhsNested; typedef SparseSparseProduct Type; }; namespace internal { template struct traits > { typedef MatrixXpr XprKind; // clean the nested types: typedef typename remove_all::type _LhsNested; typedef typename remove_all::type _RhsNested; typedef typename _LhsNested::Scalar Scalar; typedef typename promote_index_type::Index, typename traits<_RhsNested>::Index>::type Index; enum { LhsCoeffReadCost = _LhsNested::CoeffReadCost, RhsCoeffReadCost = _RhsNested::CoeffReadCost, LhsFlags = _LhsNested::Flags, RhsFlags = _RhsNested::Flags, RowsAtCompileTime = _LhsNested::RowsAtCompileTime, ColsAtCompileTime = _RhsNested::ColsAtCompileTime, MaxRowsAtCompileTime = _LhsNested::MaxRowsAtCompileTime, MaxColsAtCompileTime = _RhsNested::MaxColsAtCompileTime, InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(_LhsNested::ColsAtCompileTime, _RhsNested::RowsAtCompileTime), EvalToRowMajor = (RhsFlags & LhsFlags & RowMajorBit), RemovedBits = ~(EvalToRowMajor ? 0 : RowMajorBit), Flags = (int(LhsFlags | RhsFlags) & HereditaryBits & RemovedBits) | EvalBeforeAssigningBit | EvalBeforeNestingBit, CoeffReadCost = Dynamic }; typedef Sparse StorageKind; }; } // end namespace internal template class SparseSparseProduct : internal::no_assignment_operator, public SparseMatrixBase > { public: typedef SparseMatrixBase Base; EIGEN_DENSE_PUBLIC_INTERFACE(SparseSparseProduct) private: typedef typename internal::traits::_LhsNested _LhsNested; typedef typename internal::traits::_RhsNested _RhsNested; public: template EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs), m_tolerance(0), m_conservative(true) { init(); } template EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs, const RealScalar& tolerance) : m_lhs(lhs), m_rhs(rhs), m_tolerance(tolerance), m_conservative(false) { init(); } SparseSparseProduct pruned(const Scalar& reference = 0, const RealScalar& epsilon = NumTraits::dummy_precision()) const { using std::abs; return SparseSparseProduct(m_lhs,m_rhs,abs(reference)*epsilon); } template void evalTo(Dest& result) const { if(m_conservative) internal::conservative_sparse_sparse_product_selector<_LhsNested, _RhsNested, Dest>::run(lhs(),rhs(),result); else internal::sparse_sparse_product_with_pruning_selector<_LhsNested, _RhsNested, Dest>::run(lhs(),rhs(),result,m_tolerance); } EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); } EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); } EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; } EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; } protected: void init() { eigen_assert(m_lhs.cols() == m_rhs.rows()); enum { ProductIsValid = _LhsNested::ColsAtCompileTime==Dynamic || _RhsNested::RowsAtCompileTime==Dynamic || int(_LhsNested::ColsAtCompileTime)==int(_RhsNested::RowsAtCompileTime), AreVectors = _LhsNested::IsVectorAtCompileTime && _RhsNested::IsVectorAtCompileTime, SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(_LhsNested,_RhsNested) }; // note to the lost user: // * for a dot product use: v1.dot(v2) // * for a coeff-wise product use: v1.cwise()*v2 EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes), INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS) EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors), INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION) EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT) } LhsNested m_lhs; RhsNested m_rhs; RealScalar m_tolerance; bool m_conservative; }; // sparse = sparse * sparse template template inline Derived& SparseMatrixBase::operator=(const SparseSparseProduct& product) { product.evalTo(derived()); return derived(); } /** \returns an expression of the product of two sparse matrices. * By default a conservative product preserving the symbolic non zeros is performed. * The automatic pruning of the small values can be achieved by calling the pruned() function * in which case a totally different product algorithm is employed: * \code * C = (A*B).pruned(); // supress numerical zeros (exact) * C = (A*B).pruned(ref); * C = (A*B).pruned(ref,epsilon); * \endcode * where \c ref is a meaningful non zero reference value. * */ template template inline const typename SparseSparseProductReturnType::Type SparseMatrixBase::operator*(const SparseMatrixBase &other) const { return typename SparseSparseProductReturnType::Type(derived(), other.derived()); } } // end namespace Eigen #endif // EIGEN_SPARSEPRODUCT_H antimony/lib/fab/vendor/Eigen/src/SparseCore/MappedSparseMatrix.h0000644000175000017500000001415213341130426024305 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MAPPED_SPARSEMATRIX_H #define EIGEN_MAPPED_SPARSEMATRIX_H namespace Eigen { /** \class MappedSparseMatrix * * \brief Sparse matrix * * \param _Scalar the scalar type, i.e. the type of the coefficients * * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme. * */ namespace internal { template struct traits > : traits > {}; } template class MappedSparseMatrix : public SparseMatrixBase > { public: EIGEN_SPARSE_PUBLIC_INTERFACE(MappedSparseMatrix) enum { IsRowMajor = Base::IsRowMajor }; protected: Index m_outerSize; Index m_innerSize; Index m_nnz; Index* m_outerIndex; Index* m_innerIndices; Scalar* m_values; public: inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; } inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; } inline Index innerSize() const { return m_innerSize; } inline Index outerSize() const { return m_outerSize; } bool isCompressed() const { return true; } //---------------------------------------- // direct access interface inline const Scalar* valuePtr() const { return m_values; } inline Scalar* valuePtr() { return m_values; } inline const Index* innerIndexPtr() const { return m_innerIndices; } inline Index* innerIndexPtr() { return m_innerIndices; } inline const Index* outerIndexPtr() const { return m_outerIndex; } inline Index* outerIndexPtr() { return m_outerIndex; } //---------------------------------------- inline Scalar coeff(Index row, Index col) const { const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; Index start = m_outerIndex[outer]; Index end = m_outerIndex[outer+1]; if (start==end) return Scalar(0); else if (end>0 && inner==m_innerIndices[end-1]) return m_values[end-1]; // ^^ optimization: let's first check if it is the last coefficient // (very common in high level algorithms) const Index* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end-1],inner); const Index id = r-&m_innerIndices[0]; return ((*r==inner) && (id=start && "you probably called coeffRef on a non finalized matrix"); eigen_assert(end>start && "coeffRef cannot be called on a zero coefficient"); Index* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end],inner); const Index id = r-&m_innerIndices[0]; eigen_assert((*r==inner) && (id class MappedSparseMatrix::InnerIterator { public: InnerIterator(const MappedSparseMatrix& mat, Index outer) : m_matrix(mat), m_outer(outer), m_id(mat.outerIndexPtr()[outer]), m_start(m_id), m_end(mat.outerIndexPtr()[outer+1]) {} inline InnerIterator& operator++() { m_id++; return *this; } inline Scalar value() const { return m_matrix.valuePtr()[m_id]; } inline Scalar& valueRef() { return const_cast(m_matrix.valuePtr()[m_id]); } inline Index index() const { return m_matrix.innerIndexPtr()[m_id]; } inline Index row() const { return IsRowMajor ? m_outer : index(); } inline Index col() const { return IsRowMajor ? index() : m_outer; } inline operator bool() const { return (m_id < m_end) && (m_id>=m_start); } protected: const MappedSparseMatrix& m_matrix; const Index m_outer; Index m_id; const Index m_start; const Index m_end; }; template class MappedSparseMatrix::ReverseInnerIterator { public: ReverseInnerIterator(const MappedSparseMatrix& mat, Index outer) : m_matrix(mat), m_outer(outer), m_id(mat.outerIndexPtr()[outer+1]), m_start(mat.outerIndexPtr()[outer]), m_end(m_id) {} inline ReverseInnerIterator& operator--() { m_id--; return *this; } inline Scalar value() const { return m_matrix.valuePtr()[m_id-1]; } inline Scalar& valueRef() { return const_cast(m_matrix.valuePtr()[m_id-1]); } inline Index index() const { return m_matrix.innerIndexPtr()[m_id-1]; } inline Index row() const { return IsRowMajor ? m_outer : index(); } inline Index col() const { return IsRowMajor ? index() : m_outer; } inline operator bool() const { return (m_id <= m_end) && (m_id>m_start); } protected: const MappedSparseMatrix& m_matrix; const Index m_outer; Index m_id; const Index m_start; const Index m_end; }; } // end namespace Eigen #endif // EIGEN_MAPPED_SPARSEMATRIX_H antimony/lib/fab/vendor/Eigen/src/SparseCore/SparseTriangularView.h0000644000175000017500000001346113341130426024657 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud // Copyright (C) 2012 Désiré Nuentsa-Wakam // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_TRIANGULARVIEW_H #define EIGEN_SPARSE_TRIANGULARVIEW_H namespace Eigen { namespace internal { template struct traits > : public traits {}; } // namespace internal template class SparseTriangularView : public SparseMatrixBase > { enum { SkipFirst = ((Mode&Lower) && !(MatrixType::Flags&RowMajorBit)) || ((Mode&Upper) && (MatrixType::Flags&RowMajorBit)), SkipLast = !SkipFirst, SkipDiag = (Mode&ZeroDiag) ? 1 : 0, HasUnitDiag = (Mode&UnitDiag) ? 1 : 0 }; public: EIGEN_SPARSE_PUBLIC_INTERFACE(SparseTriangularView) class InnerIterator; class ReverseInnerIterator; inline Index rows() const { return m_matrix.rows(); } inline Index cols() const { return m_matrix.cols(); } typedef typename MatrixType::Nested MatrixTypeNested; typedef typename internal::remove_reference::type MatrixTypeNestedNonRef; typedef typename internal::remove_all::type MatrixTypeNestedCleaned; inline SparseTriangularView(const MatrixType& matrix) : m_matrix(matrix) {} /** \internal */ inline const MatrixTypeNestedCleaned& nestedExpression() const { return m_matrix; } template typename internal::plain_matrix_type_column_major::type solve(const MatrixBase& other) const; template void solveInPlace(MatrixBase& other) const; template void solveInPlace(SparseMatrixBase& other) const; protected: MatrixTypeNested m_matrix; }; template class SparseTriangularView::InnerIterator : public MatrixTypeNestedCleaned::InnerIterator { typedef typename MatrixTypeNestedCleaned::InnerIterator Base; typedef typename SparseTriangularView::Index Index; public: EIGEN_STRONG_INLINE InnerIterator(const SparseTriangularView& view, Index outer) : Base(view.nestedExpression(), outer), m_returnOne(false) { if(SkipFirst) { while((*this) && ((HasUnitDiag||SkipDiag) ? this->index()<=outer : this->index()=Base::outer())) { if((!SkipFirst) && Base::operator bool()) Base::operator++(); m_returnOne = true; } } EIGEN_STRONG_INLINE InnerIterator& operator++() { if(HasUnitDiag && m_returnOne) m_returnOne = false; else { Base::operator++(); if(HasUnitDiag && (!SkipFirst) && ((!Base::operator bool()) || Base::index()>=Base::outer())) { if((!SkipFirst) && Base::operator bool()) Base::operator++(); m_returnOne = true; } } return *this; } inline Index row() const { return (MatrixType::Flags&RowMajorBit ? Base::outer() : this->index()); } inline Index col() const { return (MatrixType::Flags&RowMajorBit ? this->index() : Base::outer()); } inline Index index() const { if(HasUnitDiag && m_returnOne) return Base::outer(); else return Base::index(); } inline Scalar value() const { if(HasUnitDiag && m_returnOne) return Scalar(1); else return Base::value(); } EIGEN_STRONG_INLINE operator bool() const { if(HasUnitDiag && m_returnOne) return true; if(SkipFirst) return Base::operator bool(); else { if (SkipDiag) return (Base::operator bool() && this->index() < this->outer()); else return (Base::operator bool() && this->index() <= this->outer()); } } protected: bool m_returnOne; }; template class SparseTriangularView::ReverseInnerIterator : public MatrixTypeNestedCleaned::ReverseInnerIterator { typedef typename MatrixTypeNestedCleaned::ReverseInnerIterator Base; typedef typename SparseTriangularView::Index Index; public: EIGEN_STRONG_INLINE ReverseInnerIterator(const SparseTriangularView& view, Index outer) : Base(view.nestedExpression(), outer) { eigen_assert((!HasUnitDiag) && "ReverseInnerIterator does not support yet triangular views with a unit diagonal"); if(SkipLast) { while((*this) && (SkipDiag ? this->index()>=outer : this->index()>outer)) --(*this); } } EIGEN_STRONG_INLINE ReverseInnerIterator& operator--() { Base::operator--(); return *this; } inline Index row() const { return Base::row(); } inline Index col() const { return Base::col(); } EIGEN_STRONG_INLINE operator bool() const { if (SkipLast) return Base::operator bool() ; else { if(SkipDiag) return (Base::operator bool() && this->index() > this->outer()); else return (Base::operator bool() && this->index() >= this->outer()); } } }; template template inline const SparseTriangularView SparseMatrixBase::triangularView() const { return derived(); } } // end namespace Eigen #endif // EIGEN_SPARSE_TRIANGULARVIEW_H antimony/lib/fab/vendor/Eigen/src/SparseCore/SparseCwiseBinaryOp.h0000644000175000017500000002740213341130426024432 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_CWISE_BINARY_OP_H #define EIGEN_SPARSE_CWISE_BINARY_OP_H namespace Eigen { // Here we have to handle 3 cases: // 1 - sparse op dense // 2 - dense op sparse // 3 - sparse op sparse // We also need to implement a 4th iterator for: // 4 - dense op dense // Finally, we also need to distinguish between the product and other operations : // configuration returned mode // 1 - sparse op dense product sparse // generic dense // 2 - dense op sparse product sparse // generic dense // 3 - sparse op sparse product sparse // generic sparse // 4 - dense op dense product dense // generic dense namespace internal { template<> struct promote_storage_type { typedef Sparse ret; }; template<> struct promote_storage_type { typedef Sparse ret; }; template::StorageKind, typename _RhsStorageMode = typename traits::StorageKind> class sparse_cwise_binary_op_inner_iterator_selector; } // end namespace internal template class CwiseBinaryOpImpl : public SparseMatrixBase > { public: class InnerIterator; class ReverseInnerIterator; typedef CwiseBinaryOp Derived; EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) CwiseBinaryOpImpl() { typedef typename internal::traits::StorageKind LhsStorageKind; typedef typename internal::traits::StorageKind RhsStorageKind; EIGEN_STATIC_ASSERT(( (!internal::is_same::value) || ((Lhs::Flags&RowMajorBit) == (Rhs::Flags&RowMajorBit))), THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH); } }; template class CwiseBinaryOpImpl::InnerIterator : public internal::sparse_cwise_binary_op_inner_iterator_selector::InnerIterator> { public: typedef typename Lhs::Index Index; typedef internal::sparse_cwise_binary_op_inner_iterator_selector< BinaryOp,Lhs,Rhs, InnerIterator> Base; // NOTE: we have to prefix Index by "typename Lhs::" to avoid an ICE with VC11 EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, typename Lhs::Index outer) : Base(binOp.derived(),outer) {} }; /*************************************************************************** * Implementation of inner-iterators ***************************************************************************/ // template struct internal::func_is_conjunction { enum { ret = false }; }; // template struct internal::func_is_conjunction > { enum { ret = true }; }; // TODO generalize the internal::scalar_product_op specialization to all conjunctions if any ! namespace internal { // sparse - sparse (generic) template class sparse_cwise_binary_op_inner_iterator_selector { typedef CwiseBinaryOp CwiseBinaryXpr; typedef typename traits::Scalar Scalar; typedef typename traits::_LhsNested _LhsNested; typedef typename traits::_RhsNested _RhsNested; typedef typename _LhsNested::InnerIterator LhsIterator; typedef typename _RhsNested::InnerIterator RhsIterator; typedef typename Lhs::Index Index; public: EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer) : m_lhsIter(xpr.lhs(),outer), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor()) { this->operator++(); } EIGEN_STRONG_INLINE Derived& operator++() { if (m_lhsIter && m_rhsIter && (m_lhsIter.index() == m_rhsIter.index())) { m_id = m_lhsIter.index(); m_value = m_functor(m_lhsIter.value(), m_rhsIter.value()); ++m_lhsIter; ++m_rhsIter; } else if (m_lhsIter && (!m_rhsIter || (m_lhsIter.index() < m_rhsIter.index()))) { m_id = m_lhsIter.index(); m_value = m_functor(m_lhsIter.value(), Scalar(0)); ++m_lhsIter; } else if (m_rhsIter && (!m_lhsIter || (m_lhsIter.index() > m_rhsIter.index()))) { m_id = m_rhsIter.index(); m_value = m_functor(Scalar(0), m_rhsIter.value()); ++m_rhsIter; } else { m_value = 0; // this is to avoid a compilation warning m_id = -1; } return *static_cast(this); } EIGEN_STRONG_INLINE Scalar value() const { return m_value; } EIGEN_STRONG_INLINE Index index() const { return m_id; } EIGEN_STRONG_INLINE Index row() const { return Lhs::IsRowMajor ? m_lhsIter.row() : index(); } EIGEN_STRONG_INLINE Index col() const { return Lhs::IsRowMajor ? index() : m_lhsIter.col(); } EIGEN_STRONG_INLINE operator bool() const { return m_id>=0; } protected: LhsIterator m_lhsIter; RhsIterator m_rhsIter; const BinaryOp& m_functor; Scalar m_value; Index m_id; }; // sparse - sparse (product) template class sparse_cwise_binary_op_inner_iterator_selector, Lhs, Rhs, Derived, Sparse, Sparse> { typedef scalar_product_op BinaryFunc; typedef CwiseBinaryOp CwiseBinaryXpr; typedef typename CwiseBinaryXpr::Scalar Scalar; typedef typename traits::_LhsNested _LhsNested; typedef typename _LhsNested::InnerIterator LhsIterator; typedef typename traits::_RhsNested _RhsNested; typedef typename _RhsNested::InnerIterator RhsIterator; typedef typename Lhs::Index Index; public: EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer) : m_lhsIter(xpr.lhs(),outer), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor()) { while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index())) { if (m_lhsIter.index() < m_rhsIter.index()) ++m_lhsIter; else ++m_rhsIter; } } EIGEN_STRONG_INLINE Derived& operator++() { ++m_lhsIter; ++m_rhsIter; while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index())) { if (m_lhsIter.index() < m_rhsIter.index()) ++m_lhsIter; else ++m_rhsIter; } return *static_cast(this); } EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_lhsIter.value(), m_rhsIter.value()); } EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); } EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); } EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); } EIGEN_STRONG_INLINE operator bool() const { return (m_lhsIter && m_rhsIter); } protected: LhsIterator m_lhsIter; RhsIterator m_rhsIter; const BinaryFunc& m_functor; }; // sparse - dense (product) template class sparse_cwise_binary_op_inner_iterator_selector, Lhs, Rhs, Derived, Sparse, Dense> { typedef scalar_product_op BinaryFunc; typedef CwiseBinaryOp CwiseBinaryXpr; typedef typename CwiseBinaryXpr::Scalar Scalar; typedef typename traits::_LhsNested _LhsNested; typedef typename traits::RhsNested RhsNested; typedef typename _LhsNested::InnerIterator LhsIterator; typedef typename Lhs::Index Index; enum { IsRowMajor = (int(Lhs::Flags)&RowMajorBit)==RowMajorBit }; public: EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer) : m_rhs(xpr.rhs()), m_lhsIter(xpr.lhs(),outer), m_functor(xpr.functor()), m_outer(outer) {} EIGEN_STRONG_INLINE Derived& operator++() { ++m_lhsIter; return *static_cast(this); } EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_lhsIter.value(), m_rhs.coeff(IsRowMajor?m_outer:m_lhsIter.index(),IsRowMajor?m_lhsIter.index():m_outer)); } EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); } EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); } EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); } EIGEN_STRONG_INLINE operator bool() const { return m_lhsIter; } protected: RhsNested m_rhs; LhsIterator m_lhsIter; const BinaryFunc m_functor; const Index m_outer; }; // sparse - dense (product) template class sparse_cwise_binary_op_inner_iterator_selector, Lhs, Rhs, Derived, Dense, Sparse> { typedef scalar_product_op BinaryFunc; typedef CwiseBinaryOp CwiseBinaryXpr; typedef typename CwiseBinaryXpr::Scalar Scalar; typedef typename traits::_RhsNested _RhsNested; typedef typename _RhsNested::InnerIterator RhsIterator; typedef typename Lhs::Index Index; enum { IsRowMajor = (int(Rhs::Flags)&RowMajorBit)==RowMajorBit }; public: EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer) : m_xpr(xpr), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor()), m_outer(outer) {} EIGEN_STRONG_INLINE Derived& operator++() { ++m_rhsIter; return *static_cast(this); } EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_xpr.lhs().coeff(IsRowMajor?m_outer:m_rhsIter.index(),IsRowMajor?m_rhsIter.index():m_outer), m_rhsIter.value()); } EIGEN_STRONG_INLINE Index index() const { return m_rhsIter.index(); } EIGEN_STRONG_INLINE Index row() const { return m_rhsIter.row(); } EIGEN_STRONG_INLINE Index col() const { return m_rhsIter.col(); } EIGEN_STRONG_INLINE operator bool() const { return m_rhsIter; } protected: const CwiseBinaryXpr& m_xpr; RhsIterator m_rhsIter; const BinaryFunc& m_functor; const Index m_outer; }; } // end namespace internal /*************************************************************************** * Implementation of SparseMatrixBase and SparseCwise functions/operators ***************************************************************************/ template template EIGEN_STRONG_INLINE Derived & SparseMatrixBase::operator-=(const SparseMatrixBase &other) { return derived() = derived() - other.derived(); } template template EIGEN_STRONG_INLINE Derived & SparseMatrixBase::operator+=(const SparseMatrixBase& other) { return derived() = derived() + other.derived(); } template template EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE SparseMatrixBase::cwiseProduct(const MatrixBase &other) const { return EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE(derived(), other.derived()); } } // end namespace Eigen #endif // EIGEN_SPARSE_CWISE_BINARY_OP_H antimony/lib/fab/vendor/Eigen/src/SparseCore/CompressedStorage.h0000644000175000017500000001506313341130426024167 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_COMPRESSED_STORAGE_H #define EIGEN_COMPRESSED_STORAGE_H namespace Eigen { namespace internal { /** \internal * Stores a sparse set of values as a list of values and a list of indices. * */ template class CompressedStorage { public: typedef _Scalar Scalar; typedef _Index Index; protected: typedef typename NumTraits::Real RealScalar; public: CompressedStorage() : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0) {} CompressedStorage(size_t size) : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0) { resize(size); } CompressedStorage(const CompressedStorage& other) : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0) { *this = other; } CompressedStorage& operator=(const CompressedStorage& other) { resize(other.size()); internal::smart_copy(other.m_values, other.m_values + m_size, m_values); internal::smart_copy(other.m_indices, other.m_indices + m_size, m_indices); return *this; } void swap(CompressedStorage& other) { std::swap(m_values, other.m_values); std::swap(m_indices, other.m_indices); std::swap(m_size, other.m_size); std::swap(m_allocatedSize, other.m_allocatedSize); } ~CompressedStorage() { delete[] m_values; delete[] m_indices; } void reserve(size_t size) { size_t newAllocatedSize = m_size + size; if (newAllocatedSize > m_allocatedSize) reallocate(newAllocatedSize); } void squeeze() { if (m_allocatedSize>m_size) reallocate(m_size); } void resize(size_t size, double reserveSizeFactor = 0) { if (m_allocatedSize(m_size); resize(m_size+1, 1); m_values[id] = v; m_indices[id] = i; } inline size_t size() const { return m_size; } inline size_t allocatedSize() const { return m_allocatedSize; } inline void clear() { m_size = 0; } inline Scalar& value(size_t i) { return m_values[i]; } inline const Scalar& value(size_t i) const { return m_values[i]; } inline Index& index(size_t i) { return m_indices[i]; } inline const Index& index(size_t i) const { return m_indices[i]; } static CompressedStorage Map(Index* indices, Scalar* values, size_t size) { CompressedStorage res; res.m_indices = indices; res.m_values = values; res.m_allocatedSize = res.m_size = size; return res; } /** \returns the largest \c k such that for all \c j in [0,k) index[\c j]\<\a key */ inline Index searchLowerIndex(Index key) const { return searchLowerIndex(0, m_size, key); } /** \returns the largest \c k in [start,end) such that for all \c j in [start,k) index[\c j]\<\a key */ inline Index searchLowerIndex(size_t start, size_t end, Index key) const { while(end>start) { size_t mid = (end+start)>>1; if (m_indices[mid](start); } /** \returns the stored value at index \a key * If the value does not exist, then the value \a defaultValue is returned without any insertion. */ inline Scalar at(Index key, const Scalar& defaultValue = Scalar(0)) const { if (m_size==0) return defaultValue; else if (key==m_indices[m_size-1]) return m_values[m_size-1]; // ^^ optimization: let's first check if it is the last coefficient // (very common in high level algorithms) const size_t id = searchLowerIndex(0,m_size-1,key); return ((id=end) return Scalar(0); else if (end>start && key==m_indices[end-1]) return m_values[end-1]; // ^^ optimization: let's first check if it is the last coefficient // (very common in high level algorithms) const size_t id = searchLowerIndex(start,end-1,key); return ((id=m_size || m_indices[id]!=key) { resize(m_size+1,1); for (size_t j=m_size-1; j>id; --j) { m_indices[j] = m_indices[j-1]; m_values[j] = m_values[j-1]; } m_indices[id] = key; m_values[id] = defaultValue; } return m_values[id]; } void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits::dummy_precision()) { size_t k = 0; size_t n = size(); for (size_t i=0; i // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSEREDUX_H #define EIGEN_SPARSEREDUX_H namespace Eigen { template typename internal::traits::Scalar SparseMatrixBase::sum() const { eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix"); Scalar res(0); for (Index j=0; j typename internal::traits >::Scalar SparseMatrix<_Scalar,_Options,_Index>::sum() const { eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix"); return Matrix::Map(&m_data.value(0), m_data.size()).sum(); } template typename internal::traits >::Scalar SparseVector<_Scalar,_Options,_Index>::sum() const { eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix"); return Matrix::Map(&m_data.value(0), m_data.size()).sum(); } } // end namespace Eigen #endif // EIGEN_SPARSEREDUX_H antimony/lib/fab/vendor/Eigen/src/SparseCore/SparseDiagonalProduct.h0000644000175000017500000001702713341130426024775 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_DIAGONAL_PRODUCT_H #define EIGEN_SPARSE_DIAGONAL_PRODUCT_H namespace Eigen { // The product of a diagonal matrix with a sparse matrix can be easily // implemented using expression template. // We have two consider very different cases: // 1 - diag * row-major sparse // => each inner vector <=> scalar * sparse vector product // => so we can reuse CwiseUnaryOp::InnerIterator // 2 - diag * col-major sparse // => each inner vector <=> densevector * sparse vector cwise product // => again, we can reuse specialization of CwiseBinaryOp::InnerIterator // for that particular case // The two other cases are symmetric. namespace internal { template struct traits > { typedef typename remove_all::type _Lhs; typedef typename remove_all::type _Rhs; typedef typename _Lhs::Scalar Scalar; typedef typename promote_index_type::Index, typename traits::Index>::type Index; typedef Sparse StorageKind; typedef MatrixXpr XprKind; enum { RowsAtCompileTime = _Lhs::RowsAtCompileTime, ColsAtCompileTime = _Rhs::ColsAtCompileTime, MaxRowsAtCompileTime = _Lhs::MaxRowsAtCompileTime, MaxColsAtCompileTime = _Rhs::MaxColsAtCompileTime, SparseFlags = is_diagonal<_Lhs>::ret ? int(_Rhs::Flags) : int(_Lhs::Flags), Flags = (SparseFlags&RowMajorBit), CoeffReadCost = Dynamic }; }; enum {SDP_IsDiagonal, SDP_IsSparseRowMajor, SDP_IsSparseColMajor}; template class sparse_diagonal_product_inner_iterator_selector; } // end namespace internal template class SparseDiagonalProduct : public SparseMatrixBase >, internal::no_assignment_operator { typedef typename Lhs::Nested LhsNested; typedef typename Rhs::Nested RhsNested; typedef typename internal::remove_all::type _LhsNested; typedef typename internal::remove_all::type _RhsNested; enum { LhsMode = internal::is_diagonal<_LhsNested>::ret ? internal::SDP_IsDiagonal : (_LhsNested::Flags&RowMajorBit) ? internal::SDP_IsSparseRowMajor : internal::SDP_IsSparseColMajor, RhsMode = internal::is_diagonal<_RhsNested>::ret ? internal::SDP_IsDiagonal : (_RhsNested::Flags&RowMajorBit) ? internal::SDP_IsSparseRowMajor : internal::SDP_IsSparseColMajor }; public: EIGEN_SPARSE_PUBLIC_INTERFACE(SparseDiagonalProduct) typedef internal::sparse_diagonal_product_inner_iterator_selector <_LhsNested,_RhsNested,SparseDiagonalProduct,LhsMode,RhsMode> InnerIterator; // We do not want ReverseInnerIterator for diagonal-sparse products, // but this dummy declaration is needed to make diag * sparse * diag compile. class ReverseInnerIterator; EIGEN_STRONG_INLINE SparseDiagonalProduct(const Lhs& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs) { eigen_assert(lhs.cols() == rhs.rows() && "invalid sparse matrix * diagonal matrix product"); } EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); } EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); } EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; } EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; } protected: LhsNested m_lhs; RhsNested m_rhs; }; namespace internal { template class sparse_diagonal_product_inner_iterator_selector : public CwiseUnaryOp,const Rhs>::InnerIterator { typedef typename CwiseUnaryOp,const Rhs>::InnerIterator Base; typedef typename Lhs::Index Index; public: inline sparse_diagonal_product_inner_iterator_selector( const SparseDiagonalProductType& expr, Index outer) : Base(expr.rhs()*(expr.lhs().diagonal().coeff(outer)), outer) {} }; template class sparse_diagonal_product_inner_iterator_selector : public CwiseBinaryOp< scalar_product_op, const typename Rhs::ConstInnerVectorReturnType, const typename Lhs::DiagonalVectorType>::InnerIterator { typedef typename CwiseBinaryOp< scalar_product_op, const typename Rhs::ConstInnerVectorReturnType, const typename Lhs::DiagonalVectorType>::InnerIterator Base; typedef typename Lhs::Index Index; Index m_outer; public: inline sparse_diagonal_product_inner_iterator_selector( const SparseDiagonalProductType& expr, Index outer) : Base(expr.rhs().innerVector(outer) .cwiseProduct(expr.lhs().diagonal()), 0), m_outer(outer) {} inline Index outer() const { return m_outer; } inline Index col() const { return m_outer; } }; template class sparse_diagonal_product_inner_iterator_selector : public CwiseUnaryOp,const Lhs>::InnerIterator { typedef typename CwiseUnaryOp,const Lhs>::InnerIterator Base; typedef typename Lhs::Index Index; public: inline sparse_diagonal_product_inner_iterator_selector( const SparseDiagonalProductType& expr, Index outer) : Base(expr.lhs()*expr.rhs().diagonal().coeff(outer), outer) {} }; template class sparse_diagonal_product_inner_iterator_selector : public CwiseBinaryOp< scalar_product_op, const typename Lhs::ConstInnerVectorReturnType, const Transpose >::InnerIterator { typedef typename CwiseBinaryOp< scalar_product_op, const typename Lhs::ConstInnerVectorReturnType, const Transpose >::InnerIterator Base; typedef typename Lhs::Index Index; Index m_outer; public: inline sparse_diagonal_product_inner_iterator_selector( const SparseDiagonalProductType& expr, Index outer) : Base(expr.lhs().innerVector(outer) .cwiseProduct(expr.rhs().diagonal().transpose()), 0), m_outer(outer) {} inline Index outer() const { return m_outer; } inline Index row() const { return m_outer; } }; } // end namespace internal // SparseMatrixBase functions template template const SparseDiagonalProduct SparseMatrixBase::operator*(const DiagonalBase &other) const { return SparseDiagonalProduct(this->derived(), other.derived()); } } // end namespace Eigen #endif // EIGEN_SPARSE_DIAGONAL_PRODUCT_H antimony/lib/fab/vendor/Eigen/src/SparseCore/SparsePermutation.h0000644000175000017500000001426713341130426024230 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_PERMUTATION_H #define EIGEN_SPARSE_PERMUTATION_H // This file implements sparse * permutation products namespace Eigen { namespace internal { template struct traits > { typedef typename remove_all::type MatrixTypeNestedCleaned; typedef typename MatrixTypeNestedCleaned::Scalar Scalar; typedef typename MatrixTypeNestedCleaned::Index Index; enum { SrcStorageOrder = MatrixTypeNestedCleaned::Flags&RowMajorBit ? RowMajor : ColMajor, MoveOuter = SrcStorageOrder==RowMajor ? Side==OnTheLeft : Side==OnTheRight }; typedef typename internal::conditional, SparseMatrix >::type ReturnType; }; template struct permut_sparsematrix_product_retval : public ReturnByValue > { typedef typename remove_all::type MatrixTypeNestedCleaned; typedef typename MatrixTypeNestedCleaned::Scalar Scalar; typedef typename MatrixTypeNestedCleaned::Index Index; enum { SrcStorageOrder = MatrixTypeNestedCleaned::Flags&RowMajorBit ? RowMajor : ColMajor, MoveOuter = SrcStorageOrder==RowMajor ? Side==OnTheLeft : Side==OnTheRight }; permut_sparsematrix_product_retval(const PermutationType& perm, const MatrixType& matrix) : m_permutation(perm), m_matrix(matrix) {} inline int rows() const { return m_matrix.rows(); } inline int cols() const { return m_matrix.cols(); } template inline void evalTo(Dest& dst) const { if(MoveOuter) { SparseMatrix tmp(m_matrix.rows(), m_matrix.cols()); Matrix sizes(m_matrix.outerSize()); for(Index j=0; j tmp(m_matrix.rows(), m_matrix.cols()); Matrix sizes(tmp.outerSize()); sizes.setZero(); PermutationMatrix perm; if((Side==OnTheLeft) ^ Transposed) perm = m_permutation; else perm = m_permutation.transpose(); for(Index j=0; j inline const internal::permut_sparsematrix_product_retval, SparseDerived, OnTheRight, false> operator*(const SparseMatrixBase& matrix, const PermutationBase& perm) { return internal::permut_sparsematrix_product_retval, SparseDerived, OnTheRight, false>(perm, matrix.derived()); } /** \returns the matrix with the permutation applied to the rows */ template inline const internal::permut_sparsematrix_product_retval, SparseDerived, OnTheLeft, false> operator*( const PermutationBase& perm, const SparseMatrixBase& matrix) { return internal::permut_sparsematrix_product_retval, SparseDerived, OnTheLeft, false>(perm, matrix.derived()); } /** \returns the matrix with the inverse permutation applied to the columns. */ template inline const internal::permut_sparsematrix_product_retval, SparseDerived, OnTheRight, true> operator*(const SparseMatrixBase& matrix, const Transpose >& tperm) { return internal::permut_sparsematrix_product_retval, SparseDerived, OnTheRight, true>(tperm.nestedPermutation(), matrix.derived()); } /** \returns the matrix with the inverse permutation applied to the rows. */ template inline const internal::permut_sparsematrix_product_retval, SparseDerived, OnTheLeft, true> operator*(const Transpose >& tperm, const SparseMatrixBase& matrix) { return internal::permut_sparsematrix_product_retval, SparseDerived, OnTheLeft, true>(tperm.nestedPermutation(), matrix.derived()); } } // end namespace Eigen #endif // EIGEN_SPARSE_SELFADJOINTVIEW_H antimony/lib/fab/vendor/Eigen/src/SparseCore/SparseUtil.h0000644000175000017500000001502613341130426022630 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSEUTIL_H #define EIGEN_SPARSEUTIL_H namespace Eigen { #ifdef NDEBUG #define EIGEN_DBG_SPARSE(X) #else #define EIGEN_DBG_SPARSE(X) X #endif #define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, Op) \ template \ EIGEN_STRONG_INLINE Derived& operator Op(const Eigen::SparseMatrixBase& other) \ { \ return Base::operator Op(other.derived()); \ } \ EIGEN_STRONG_INLINE Derived& operator Op(const Derived& other) \ { \ return Base::operator Op(other); \ } #define EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, Op) \ template \ EIGEN_STRONG_INLINE Derived& operator Op(const Other& scalar) \ { \ return Base::operator Op(scalar); \ } #define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATORS(Derived) \ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, =) \ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, +=) \ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, -=) \ EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, *=) \ EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=) #define _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, BaseClass) \ typedef BaseClass Base; \ typedef typename Eigen::internal::traits::Scalar Scalar; \ typedef typename Eigen::NumTraits::Real RealScalar; \ typedef typename Eigen::internal::nested::type Nested; \ typedef typename Eigen::internal::traits::StorageKind StorageKind; \ typedef typename Eigen::internal::traits::Index Index; \ enum { RowsAtCompileTime = Eigen::internal::traits::RowsAtCompileTime, \ ColsAtCompileTime = Eigen::internal::traits::ColsAtCompileTime, \ Flags = Eigen::internal::traits::Flags, \ CoeffReadCost = Eigen::internal::traits::CoeffReadCost, \ SizeAtCompileTime = Base::SizeAtCompileTime, \ IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \ using Base::derived; \ using Base::const_cast_derived; #define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) \ _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, Eigen::SparseMatrixBase) const int CoherentAccessPattern = 0x1; const int InnerRandomAccessPattern = 0x2 | CoherentAccessPattern; const int OuterRandomAccessPattern = 0x4 | CoherentAccessPattern; const int RandomAccessPattern = 0x8 | OuterRandomAccessPattern | InnerRandomAccessPattern; template class SparseMatrixBase; template class SparseMatrix; template class DynamicSparseMatrix; template class SparseVector; template class MappedSparseMatrix; template class SparseTriangularView; template class SparseSelfAdjointView; template class SparseDiagonalProduct; template class SparseView; template class SparseSparseProduct; template class SparseTimeDenseProduct; template class DenseTimeSparseProduct; template class SparseDenseOuterProduct; template struct SparseSparseProductReturnType; template::ColsAtCompileTime,internal::traits::RowsAtCompileTime)> struct DenseSparseProductReturnType; template::ColsAtCompileTime,internal::traits::RowsAtCompileTime)> struct SparseDenseProductReturnType; template class SparseSymmetricPermutationProduct; namespace internal { template struct sparse_eval; template struct eval : public sparse_eval::RowsAtCompileTime,traits::ColsAtCompileTime> {}; template struct sparse_eval { typedef typename traits::Scalar _Scalar; typedef typename traits::Index _Index; public: typedef SparseVector<_Scalar, RowMajor, _Index> type; }; template struct sparse_eval { typedef typename traits::Scalar _Scalar; typedef typename traits::Index _Index; public: typedef SparseVector<_Scalar, ColMajor, _Index> type; }; template struct sparse_eval { typedef typename traits::Scalar _Scalar; typedef typename traits::Index _Index; enum { _Options = ((traits::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor }; public: typedef SparseMatrix<_Scalar, _Options, _Index> type; }; template struct sparse_eval { typedef typename traits::Scalar _Scalar; public: typedef Matrix<_Scalar, 1, 1> type; }; template struct plain_matrix_type { typedef typename traits::Scalar _Scalar; typedef typename traits::Index _Index; enum { _Options = ((traits::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor }; public: typedef SparseMatrix<_Scalar, _Options, _Index> type; }; } // end namespace internal /** \ingroup SparseCore_Module * * \class Triplet * * \brief A small structure to hold a non zero as a triplet (i,j,value). * * \sa SparseMatrix::setFromTriplets() */ template::Index > class Triplet { public: Triplet() : m_row(0), m_col(0), m_value(0) {} Triplet(const Index& i, const Index& j, const Scalar& v = Scalar(0)) : m_row(i), m_col(j), m_value(v) {} /** \returns the row index of the element */ const Index& row() const { return m_row; } /** \returns the column index of the element */ const Index& col() const { return m_col; } /** \returns the value of the element */ const Scalar& value() const { return m_value; } protected: Index m_row, m_col; Scalar m_value; }; } // end namespace Eigen #endif // EIGEN_SPARSEUTIL_H antimony/lib/fab/vendor/Eigen/src/SparseCore/SparseSparseProductWithPruning.h0000644000175000017500000001433313341130426026710 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2011 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H #define EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H namespace Eigen { namespace internal { // perform a pseudo in-place sparse * sparse product assuming all matrices are col major template static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, const typename ResultType::RealScalar& tolerance) { // return sparse_sparse_product_with_pruning_impl2(lhs,rhs,res); typedef typename remove_all::type::Scalar Scalar; typedef typename remove_all::type::Index Index; // make sure to call innerSize/outerSize since we fake the storage order. Index rows = lhs.innerSize(); Index cols = rhs.outerSize(); //Index size = lhs.outerSize(); eigen_assert(lhs.outerSize() == rhs.innerSize()); // allocate a temporary buffer AmbiVector tempVector(rows); // estimate the number of non zero entries // given a rhs column containing Y non zeros, we assume that the respective Y columns // of the lhs differs in average of one non zeros, thus the number of non zeros for // the product of a rhs column with the lhs is X+Y where X is the average number of non zero // per column of the lhs. // Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs) Index estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros(); // mimics a resizeByInnerOuter: if(ResultType::IsRowMajor) res.resize(cols, rows); else res.resize(rows, cols); res.reserve(estimated_nnz_prod); double ratioColRes = double(estimated_nnz_prod)/double(lhs.rows()*rhs.cols()); for (Index j=0; j::Iterator it(tempVector,tolerance); it; ++it) res.insertBackByOuterInner(j,it.index()) = it.value(); } res.finalize(); } template::Flags&RowMajorBit, int RhsStorageOrder = traits::Flags&RowMajorBit, int ResStorageOrder = traits::Flags&RowMajorBit> struct sparse_sparse_product_with_pruning_selector; template struct sparse_sparse_product_with_pruning_selector { typedef typename traits::type>::Scalar Scalar; typedef typename ResultType::RealScalar RealScalar; static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance) { typename remove_all::type _res(res.rows(), res.cols()); internal::sparse_sparse_product_with_pruning_impl(lhs, rhs, _res, tolerance); res.swap(_res); } }; template struct sparse_sparse_product_with_pruning_selector { typedef typename ResultType::RealScalar RealScalar; static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance) { // we need a col-major matrix to hold the result typedef SparseMatrix SparseTemporaryType; SparseTemporaryType _res(res.rows(), res.cols()); internal::sparse_sparse_product_with_pruning_impl(lhs, rhs, _res, tolerance); res = _res; } }; template struct sparse_sparse_product_with_pruning_selector { typedef typename ResultType::RealScalar RealScalar; static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance) { // let's transpose the product to get a column x column product typename remove_all::type _res(res.rows(), res.cols()); internal::sparse_sparse_product_with_pruning_impl(rhs, lhs, _res, tolerance); res.swap(_res); } }; template struct sparse_sparse_product_with_pruning_selector { typedef typename ResultType::RealScalar RealScalar; static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance) { typedef SparseMatrix ColMajorMatrixLhs; typedef SparseMatrix ColMajorMatrixRhs; ColMajorMatrixLhs colLhs(lhs); ColMajorMatrixRhs colRhs(rhs); internal::sparse_sparse_product_with_pruning_impl(colLhs, colRhs, res, tolerance); // let's transpose the product to get a column x column product // typedef SparseMatrix SparseTemporaryType; // SparseTemporaryType _res(res.cols(), res.rows()); // sparse_sparse_product_with_pruning_impl(rhs, lhs, _res); // res = _res.transpose(); } }; // NOTE the 2 others cases (col row *) must never occur since they are caught // by ProductReturnType which transforms it to (col col *) by evaluating rhs. } // end namespace internal } // end namespace Eigen #endif // EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H antimony/lib/fab/vendor/Eigen/src/Householder/0000755000175000017500000000000013341130426020573 5ustar tiagotiagoantimony/lib/fab/vendor/Eigen/src/Householder/CMakeLists.txt0000644000175000017500000000024213341130426023331 0ustar tiagotiagoFILE(GLOB Eigen_Householder_SRCS "*.h") INSTALL(FILES ${Eigen_Householder_SRCS} DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Householder COMPONENT Devel ) antimony/lib/fab/vendor/Eigen/src/Householder/BlockHouseholder.h0000644000175000017500000000527313341130426024207 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Vincent Lejeune // Copyright (C) 2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_BLOCK_HOUSEHOLDER_H #define EIGEN_BLOCK_HOUSEHOLDER_H // This file contains some helper function to deal with block householder reflectors namespace Eigen { namespace internal { /** \internal */ template void make_block_householder_triangular_factor(TriangularFactorType& triFactor, const VectorsType& vectors, const CoeffsType& hCoeffs) { typedef typename TriangularFactorType::Index Index; typedef typename VectorsType::Scalar Scalar; const Index nbVecs = vectors.cols(); eigen_assert(triFactor.rows() == nbVecs && triFactor.cols() == nbVecs && vectors.rows()>=nbVecs); for(Index i = 0; i < nbVecs; i++) { Index rs = vectors.rows() - i; Scalar Vii = vectors(i,i); vectors.const_cast_derived().coeffRef(i,i) = Scalar(1); triFactor.col(i).head(i).noalias() = -hCoeffs(i) * vectors.block(i, 0, rs, i).adjoint() * vectors.col(i).tail(rs); vectors.const_cast_derived().coeffRef(i, i) = Vii; // FIXME add .noalias() once the triangular product can work inplace triFactor.col(i).head(i) = triFactor.block(0,0,i,i).template triangularView() * triFactor.col(i).head(i); triFactor(i,i) = hCoeffs(i); } } /** \internal */ template void apply_block_householder_on_the_left(MatrixType& mat, const VectorsType& vectors, const CoeffsType& hCoeffs) { typedef typename MatrixType::Index Index; enum { TFactorSize = MatrixType::ColsAtCompileTime }; Index nbVecs = vectors.cols(); Matrix T(nbVecs,nbVecs); make_block_householder_triangular_factor(T, vectors, hCoeffs); const TriangularView& V(vectors); // A -= V T V^* A Matrix tmp = V.adjoint() * mat; // FIXME add .noalias() once the triangular product can work inplace tmp = T.template triangularView().adjoint() * tmp; mat.noalias() -= V * tmp; } } // end namespace internal } // end namespace Eigen #endif // EIGEN_BLOCK_HOUSEHOLDER_H antimony/lib/fab/vendor/Eigen/src/Householder/HouseholderSequence.h0000644000175000017500000004534413341130426024730 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud // Copyright (C) 2010 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_HOUSEHOLDER_SEQUENCE_H #define EIGEN_HOUSEHOLDER_SEQUENCE_H namespace Eigen { /** \ingroup Householder_Module * \householder_module * \class HouseholderSequence * \brief Sequence of Householder reflections acting on subspaces with decreasing size * \tparam VectorsType type of matrix containing the Householder vectors * \tparam CoeffsType type of vector containing the Householder coefficients * \tparam Side either OnTheLeft (the default) or OnTheRight * * This class represents a product sequence of Householder reflections where the first Householder reflection * acts on the whole space, the second Householder reflection leaves the one-dimensional subspace spanned by * the first unit vector invariant, the third Householder reflection leaves the two-dimensional subspace * spanned by the first two unit vectors invariant, and so on up to the last reflection which leaves all but * one dimensions invariant and acts only on the last dimension. Such sequences of Householder reflections * are used in several algorithms to zero out certain parts of a matrix. Indeed, the methods * HessenbergDecomposition::matrixQ(), Tridiagonalization::matrixQ(), HouseholderQR::householderQ(), * and ColPivHouseholderQR::householderQ() all return a %HouseholderSequence. * * More precisely, the class %HouseholderSequence represents an \f$ n \times n \f$ matrix \f$ H \f$ of the * form \f$ H = \prod_{i=0}^{n-1} H_i \f$ where the i-th Householder reflection is \f$ H_i = I - h_i v_i * v_i^* \f$. The i-th Householder coefficient \f$ h_i \f$ is a scalar and the i-th Householder vector \f$ * v_i \f$ is a vector of the form * \f[ * v_i = [\underbrace{0, \ldots, 0}_{i-1\mbox{ zeros}}, 1, \underbrace{*, \ldots,*}_{n-i\mbox{ arbitrary entries}} ]. * \f] * The last \f$ n-i \f$ entries of \f$ v_i \f$ are called the essential part of the Householder vector. * * Typical usages are listed below, where H is a HouseholderSequence: * \code * A.applyOnTheRight(H); // A = A * H * A.applyOnTheLeft(H); // A = H * A * A.applyOnTheRight(H.adjoint()); // A = A * H^* * A.applyOnTheLeft(H.adjoint()); // A = H^* * A * MatrixXd Q = H; // conversion to a dense matrix * \endcode * In addition to the adjoint, you can also apply the inverse (=adjoint), the transpose, and the conjugate operators. * * See the documentation for HouseholderSequence(const VectorsType&, const CoeffsType&) for an example. * * \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight() */ namespace internal { template struct traits > { typedef typename VectorsType::Scalar Scalar; typedef typename VectorsType::Index Index; typedef typename VectorsType::StorageKind StorageKind; enum { RowsAtCompileTime = Side==OnTheLeft ? traits::RowsAtCompileTime : traits::ColsAtCompileTime, ColsAtCompileTime = RowsAtCompileTime, MaxRowsAtCompileTime = Side==OnTheLeft ? traits::MaxRowsAtCompileTime : traits::MaxColsAtCompileTime, MaxColsAtCompileTime = MaxRowsAtCompileTime, Flags = 0 }; }; template struct hseq_side_dependent_impl { typedef Block EssentialVectorType; typedef HouseholderSequence HouseholderSequenceType; typedef typename VectorsType::Index Index; static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k) { Index start = k+1+h.m_shift; return Block(h.m_vectors, start, k, h.rows()-start, 1); } }; template struct hseq_side_dependent_impl { typedef Transpose > EssentialVectorType; typedef HouseholderSequence HouseholderSequenceType; typedef typename VectorsType::Index Index; static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k) { Index start = k+1+h.m_shift; return Block(h.m_vectors, k, start, 1, h.rows()-start).transpose(); } }; template struct matrix_type_times_scalar_type { typedef typename scalar_product_traits::ReturnType ResultScalar; typedef Matrix Type; }; } // end namespace internal template class HouseholderSequence : public EigenBase > { typedef typename internal::hseq_side_dependent_impl::EssentialVectorType EssentialVectorType; public: enum { RowsAtCompileTime = internal::traits::RowsAtCompileTime, ColsAtCompileTime = internal::traits::ColsAtCompileTime, MaxRowsAtCompileTime = internal::traits::MaxRowsAtCompileTime, MaxColsAtCompileTime = internal::traits::MaxColsAtCompileTime }; typedef typename internal::traits::Scalar Scalar; typedef typename VectorsType::Index Index; typedef HouseholderSequence< typename internal::conditional::IsComplex, typename internal::remove_all::type, VectorsType>::type, typename internal::conditional::IsComplex, typename internal::remove_all::type, CoeffsType>::type, Side > ConjugateReturnType; /** \brief Constructor. * \param[in] v %Matrix containing the essential parts of the Householder vectors * \param[in] h Vector containing the Householder coefficients * * Constructs the Householder sequence with coefficients given by \p h and vectors given by \p v. The * i-th Householder coefficient \f$ h_i \f$ is given by \p h(i) and the essential part of the i-th * Householder vector \f$ v_i \f$ is given by \p v(k,i) with \p k > \p i (the subdiagonal part of the * i-th column). If \p v has fewer columns than rows, then the Householder sequence contains as many * Householder reflections as there are columns. * * \note The %HouseholderSequence object stores \p v and \p h by reference. * * Example: \include HouseholderSequence_HouseholderSequence.cpp * Output: \verbinclude HouseholderSequence_HouseholderSequence.out * * \sa setLength(), setShift() */ HouseholderSequence(const VectorsType& v, const CoeffsType& h) : m_vectors(v), m_coeffs(h), m_trans(false), m_length(v.diagonalSize()), m_shift(0) { } /** \brief Copy constructor. */ HouseholderSequence(const HouseholderSequence& other) : m_vectors(other.m_vectors), m_coeffs(other.m_coeffs), m_trans(other.m_trans), m_length(other.m_length), m_shift(other.m_shift) { } /** \brief Number of rows of transformation viewed as a matrix. * \returns Number of rows * \details This equals the dimension of the space that the transformation acts on. */ Index rows() const { return Side==OnTheLeft ? m_vectors.rows() : m_vectors.cols(); } /** \brief Number of columns of transformation viewed as a matrix. * \returns Number of columns * \details This equals the dimension of the space that the transformation acts on. */ Index cols() const { return rows(); } /** \brief Essential part of a Householder vector. * \param[in] k Index of Householder reflection * \returns Vector containing non-trivial entries of k-th Householder vector * * This function returns the essential part of the Householder vector \f$ v_i \f$. This is a vector of * length \f$ n-i \f$ containing the last \f$ n-i \f$ entries of the vector * \f[ * v_i = [\underbrace{0, \ldots, 0}_{i-1\mbox{ zeros}}, 1, \underbrace{*, \ldots,*}_{n-i\mbox{ arbitrary entries}} ]. * \f] * The index \f$ i \f$ equals \p k + shift(), corresponding to the k-th column of the matrix \p v * passed to the constructor. * * \sa setShift(), shift() */ const EssentialVectorType essentialVector(Index k) const { eigen_assert(k >= 0 && k < m_length); return internal::hseq_side_dependent_impl::essentialVector(*this, k); } /** \brief %Transpose of the Householder sequence. */ HouseholderSequence transpose() const { return HouseholderSequence(*this).setTrans(!m_trans); } /** \brief Complex conjugate of the Householder sequence. */ ConjugateReturnType conjugate() const { return ConjugateReturnType(m_vectors.conjugate(), m_coeffs.conjugate()) .setTrans(m_trans) .setLength(m_length) .setShift(m_shift); } /** \brief Adjoint (conjugate transpose) of the Householder sequence. */ ConjugateReturnType adjoint() const { return conjugate().setTrans(!m_trans); } /** \brief Inverse of the Householder sequence (equals the adjoint). */ ConjugateReturnType inverse() const { return adjoint(); } /** \internal */ template inline void evalTo(DestType& dst) const { Matrix workspace(rows()); evalTo(dst, workspace); } /** \internal */ template void evalTo(Dest& dst, Workspace& workspace) const { workspace.resize(rows()); Index vecs = m_length; if( internal::is_same::type,Dest>::value && internal::extract_data(dst) == internal::extract_data(m_vectors)) { // in-place dst.diagonal().setOnes(); dst.template triangularView().setZero(); for(Index k = vecs-1; k >= 0; --k) { Index cornerSize = rows() - k - m_shift; if(m_trans) dst.bottomRightCorner(cornerSize, cornerSize) .applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), workspace.data()); else dst.bottomRightCorner(cornerSize, cornerSize) .applyHouseholderOnTheLeft(essentialVector(k), m_coeffs.coeff(k), workspace.data()); // clear the off diagonal vector dst.col(k).tail(rows()-k-1).setZero(); } // clear the remaining columns if needed for(Index k = 0; k= 0; --k) { Index cornerSize = rows() - k - m_shift; if(m_trans) dst.bottomRightCorner(cornerSize, cornerSize) .applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), &workspace.coeffRef(0)); else dst.bottomRightCorner(cornerSize, cornerSize) .applyHouseholderOnTheLeft(essentialVector(k), m_coeffs.coeff(k), &workspace.coeffRef(0)); } } } /** \internal */ template inline void applyThisOnTheRight(Dest& dst) const { Matrix workspace(dst.rows()); applyThisOnTheRight(dst, workspace); } /** \internal */ template inline void applyThisOnTheRight(Dest& dst, Workspace& workspace) const { workspace.resize(dst.rows()); for(Index k = 0; k < m_length; ++k) { Index actual_k = m_trans ? m_length-k-1 : k; dst.rightCols(rows()-m_shift-actual_k) .applyHouseholderOnTheRight(essentialVector(actual_k), m_coeffs.coeff(actual_k), workspace.data()); } } /** \internal */ template inline void applyThisOnTheLeft(Dest& dst) const { Matrix workspace(dst.cols()); applyThisOnTheLeft(dst, workspace); } /** \internal */ template inline void applyThisOnTheLeft(Dest& dst, Workspace& workspace) const { workspace.resize(dst.cols()); for(Index k = 0; k < m_length; ++k) { Index actual_k = m_trans ? k : m_length-k-1; dst.bottomRows(rows()-m_shift-actual_k) .applyHouseholderOnTheLeft(essentialVector(actual_k), m_coeffs.coeff(actual_k), workspace.data()); } } /** \brief Computes the product of a Householder sequence with a matrix. * \param[in] other %Matrix being multiplied. * \returns Expression object representing the product. * * This function computes \f$ HM \f$ where \f$ H \f$ is the Householder sequence represented by \p *this * and \f$ M \f$ is the matrix \p other. */ template typename internal::matrix_type_times_scalar_type::Type operator*(const MatrixBase& other) const { typename internal::matrix_type_times_scalar_type::Type res(other.template cast::ResultScalar>()); applyThisOnTheLeft(res); return res; } template friend struct internal::hseq_side_dependent_impl; /** \brief Sets the length of the Householder sequence. * \param [in] length New value for the length. * * By default, the length \f$ n \f$ of the Householder sequence \f$ H = H_0 H_1 \ldots H_{n-1} \f$ is set * to the number of columns of the matrix \p v passed to the constructor, or the number of rows if that * is smaller. After this function is called, the length equals \p length. * * \sa length() */ HouseholderSequence& setLength(Index length) { m_length = length; return *this; } /** \brief Sets the shift of the Householder sequence. * \param [in] shift New value for the shift. * * By default, a %HouseholderSequence object represents \f$ H = H_0 H_1 \ldots H_{n-1} \f$ and the i-th * column of the matrix \p v passed to the constructor corresponds to the i-th Householder * reflection. After this function is called, the object represents \f$ H = H_{\mathrm{shift}} * H_{\mathrm{shift}+1} \ldots H_{n-1} \f$ and the i-th column of \p v corresponds to the (shift+i)-th * Householder reflection. * * \sa shift() */ HouseholderSequence& setShift(Index shift) { m_shift = shift; return *this; } Index length() const { return m_length; } /**< \brief Returns the length of the Householder sequence. */ Index shift() const { return m_shift; } /**< \brief Returns the shift of the Householder sequence. */ /* Necessary for .adjoint() and .conjugate() */ template friend class HouseholderSequence; protected: /** \brief Sets the transpose flag. * \param [in] trans New value of the transpose flag. * * By default, the transpose flag is not set. If the transpose flag is set, then this object represents * \f$ H^T = H_{n-1}^T \ldots H_1^T H_0^T \f$ instead of \f$ H = H_0 H_1 \ldots H_{n-1} \f$. * * \sa trans() */ HouseholderSequence& setTrans(bool trans) { m_trans = trans; return *this; } bool trans() const { return m_trans; } /**< \brief Returns the transpose flag. */ typename VectorsType::Nested m_vectors; typename CoeffsType::Nested m_coeffs; bool m_trans; Index m_length; Index m_shift; }; /** \brief Computes the product of a matrix with a Householder sequence. * \param[in] other %Matrix being multiplied. * \param[in] h %HouseholderSequence being multiplied. * \returns Expression object representing the product. * * This function computes \f$ MH \f$ where \f$ M \f$ is the matrix \p other and \f$ H \f$ is the * Householder sequence represented by \p h. */ template typename internal::matrix_type_times_scalar_type::Type operator*(const MatrixBase& other, const HouseholderSequence& h) { typename internal::matrix_type_times_scalar_type::Type res(other.template cast::ResultScalar>()); h.applyThisOnTheRight(res); return res; } /** \ingroup Householder_Module \householder_module * \brief Convenience function for constructing a Householder sequence. * \returns A HouseholderSequence constructed from the specified arguments. */ template HouseholderSequence householderSequence(const VectorsType& v, const CoeffsType& h) { return HouseholderSequence(v, h); } /** \ingroup Householder_Module \householder_module * \brief Convenience function for constructing a Householder sequence. * \returns A HouseholderSequence constructed from the specified arguments. * \details This function differs from householderSequence() in that the template argument \p OnTheSide of * the constructed HouseholderSequence is set to OnTheRight, instead of the default OnTheLeft. */ template HouseholderSequence rightHouseholderSequence(const VectorsType& v, const CoeffsType& h) { return HouseholderSequence(v, h); } } // end namespace Eigen #endif // EIGEN_HOUSEHOLDER_SEQUENCE_H antimony/lib/fab/vendor/Eigen/src/Householder/Householder.h0000644000175000017500000001217613341130426023234 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Benoit Jacob // Copyright (C) 2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_HOUSEHOLDER_H #define EIGEN_HOUSEHOLDER_H namespace Eigen { namespace internal { template struct decrement_size { enum { ret = n==Dynamic ? n : n-1 }; }; } /** Computes the elementary reflector H such that: * \f$ H *this = [ beta 0 ... 0]^T \f$ * where the transformation H is: * \f$ H = I - tau v v^*\f$ * and the vector v is: * \f$ v^T = [1 essential^T] \f$ * * The essential part of the vector \c v is stored in *this. * * On output: * \param tau the scaling factor of the Householder transformation * \param beta the result of H * \c *this * * \sa MatrixBase::makeHouseholder(), MatrixBase::applyHouseholderOnTheLeft(), * MatrixBase::applyHouseholderOnTheRight() */ template void MatrixBase::makeHouseholderInPlace(Scalar& tau, RealScalar& beta) { VectorBlock::ret> essentialPart(derived(), 1, size()-1); makeHouseholder(essentialPart, tau, beta); } /** Computes the elementary reflector H such that: * \f$ H *this = [ beta 0 ... 0]^T \f$ * where the transformation H is: * \f$ H = I - tau v v^*\f$ * and the vector v is: * \f$ v^T = [1 essential^T] \f$ * * On output: * \param essential the essential part of the vector \c v * \param tau the scaling factor of the Householder transformation * \param beta the result of H * \c *this * * \sa MatrixBase::makeHouseholderInPlace(), MatrixBase::applyHouseholderOnTheLeft(), * MatrixBase::applyHouseholderOnTheRight() */ template template void MatrixBase::makeHouseholder( EssentialPart& essential, Scalar& tau, RealScalar& beta) const { using std::sqrt; using numext::conj; EIGEN_STATIC_ASSERT_VECTOR_ONLY(EssentialPart) VectorBlock tail(derived(), 1, size()-1); RealScalar tailSqNorm = size()==1 ? RealScalar(0) : tail.squaredNorm(); Scalar c0 = coeff(0); if(tailSqNorm == RealScalar(0) && numext::imag(c0)==RealScalar(0)) { tau = RealScalar(0); beta = numext::real(c0); essential.setZero(); } else { beta = sqrt(numext::abs2(c0) + tailSqNorm); if (numext::real(c0)>=RealScalar(0)) beta = -beta; essential = tail / (c0 - beta); tau = conj((beta - c0) / beta); } } /** Apply the elementary reflector H given by * \f$ H = I - tau v v^*\f$ * with * \f$ v^T = [1 essential^T] \f$ * from the left to a vector or matrix. * * On input: * \param essential the essential part of the vector \c v * \param tau the scaling factor of the Householder transformation * \param workspace a pointer to working space with at least * this->cols() * essential.size() entries * * \sa MatrixBase::makeHouseholder(), MatrixBase::makeHouseholderInPlace(), * MatrixBase::applyHouseholderOnTheRight() */ template template void MatrixBase::applyHouseholderOnTheLeft( const EssentialPart& essential, const Scalar& tau, Scalar* workspace) { if(rows() == 1) { *this *= Scalar(1)-tau; } else { Map::type> tmp(workspace,cols()); Block bottom(derived(), 1, 0, rows()-1, cols()); tmp.noalias() = essential.adjoint() * bottom; tmp += this->row(0); this->row(0) -= tau * tmp; bottom.noalias() -= tau * essential * tmp; } } /** Apply the elementary reflector H given by * \f$ H = I - tau v v^*\f$ * with * \f$ v^T = [1 essential^T] \f$ * from the right to a vector or matrix. * * On input: * \param essential the essential part of the vector \c v * \param tau the scaling factor of the Householder transformation * \param workspace a pointer to working space with at least * this->cols() * essential.size() entries * * \sa MatrixBase::makeHouseholder(), MatrixBase::makeHouseholderInPlace(), * MatrixBase::applyHouseholderOnTheLeft() */ template template void MatrixBase::applyHouseholderOnTheRight( const EssentialPart& essential, const Scalar& tau, Scalar* workspace) { if(cols() == 1) { *this *= Scalar(1)-tau; } else { Map::type> tmp(workspace,rows()); Block right(derived(), 0, 1, rows(), cols()-1); tmp.noalias() = right * essential.conjugate(); tmp += this->col(0); this->col(0) -= tau * tmp; right.noalias() -= tau * tmp * essential.transpose(); } } } // end namespace Eigen #endif // EIGEN_HOUSEHOLDER_H antimony/lib/fab/vendor/Eigen/src/SparseLU/0000755000175000017500000000000013341130426020010 5ustar tiagotiagoantimony/lib/fab/vendor/Eigen/src/SparseLU/CMakeLists.txt0000644000175000017500000000023113341130426022544 0ustar tiagotiagoFILE(GLOB Eigen_SparseLU_SRCS "*.h") INSTALL(FILES ${Eigen_SparseLU_SRCS} DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/SparseLU COMPONENT Devel ) antimony/lib/fab/vendor/Eigen/src/SparseLU/SparseLU_kernel_bmod.h0000644000175000017500000001307213341130426024223 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam // Copyright (C) 2012 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef SPARSELU_KERNEL_BMOD_H #define SPARSELU_KERNEL_BMOD_H namespace Eigen { namespace internal { /** * \brief Performs numeric block updates from a given supernode to a single column * * \param segsize Size of the segment (and blocks ) to use for updates * \param[in,out] dense Packed values of the original matrix * \param tempv temporary vector to use for updates * \param lusup array containing the supernodes * \param lda Leading dimension in the supernode * \param nrow Number of rows in the rectangular part of the supernode * \param lsub compressed row subscripts of supernodes * \param lptr pointer to the first column of the current supernode in lsub * \param no_zeros Number of nonzeros elements before the diagonal part of the supernode * \return 0 on success */ template struct LU_kernel_bmod { template static EIGEN_DONT_INLINE void run(const int segsize, BlockScalarVector& dense, ScalarVector& tempv, ScalarVector& lusup, Index& luptr, const Index lda, const Index nrow, IndexVector& lsub, const Index lptr, const Index no_zeros); }; template template EIGEN_DONT_INLINE void LU_kernel_bmod::run(const int segsize, BlockScalarVector& dense, ScalarVector& tempv, ScalarVector& lusup, Index& luptr, const Index lda, const Index nrow, IndexVector& lsub, const Index lptr, const Index no_zeros) { typedef typename ScalarVector::Scalar Scalar; // First, copy U[*,j] segment from dense(*) to tempv(*) // The result of triangular solve is in tempv[*]; // The result of matric-vector update is in dense[*] Index isub = lptr + no_zeros; int i; Index irow; for (i = 0; i < ((SegSizeAtCompileTime==Dynamic)?segsize:SegSizeAtCompileTime); i++) { irow = lsub(isub); tempv(i) = dense(irow); ++isub; } // Dense triangular solve -- start effective triangle luptr += lda * no_zeros + no_zeros; // Form Eigen matrix and vector Map, 0, OuterStride<> > A( &(lusup.data()[luptr]), segsize, segsize, OuterStride<>(lda) ); Map > u(tempv.data(), segsize); u = A.template triangularView().solve(u); // Dense matrix-vector product y <-- B*x luptr += segsize; const Index PacketSize = internal::packet_traits::size; Index ldl = internal::first_multiple(nrow, PacketSize); Map, 0, OuterStride<> > B( &(lusup.data()[luptr]), nrow, segsize, OuterStride<>(lda) ); Index aligned_offset = internal::first_aligned(tempv.data()+segsize, PacketSize); Index aligned_with_B_offset = (PacketSize-internal::first_aligned(B.data(), PacketSize))%PacketSize; Map, 0, OuterStride<> > l(tempv.data()+segsize+aligned_offset+aligned_with_B_offset, nrow, OuterStride<>(ldl) ); l.setZero(); internal::sparselu_gemm(l.rows(), l.cols(), B.cols(), B.data(), B.outerStride(), u.data(), u.outerStride(), l.data(), l.outerStride()); // Scatter tempv[] into SPA dense[] as a temporary storage isub = lptr + no_zeros; for (i = 0; i < ((SegSizeAtCompileTime==Dynamic)?segsize:SegSizeAtCompileTime); i++) { irow = lsub(isub++); dense(irow) = tempv(i); } // Scatter l into SPA dense[] for (i = 0; i < nrow; i++) { irow = lsub(isub++); dense(irow) -= l(i); } } template <> struct LU_kernel_bmod<1> { template static EIGEN_DONT_INLINE void run(const int /*segsize*/, BlockScalarVector& dense, ScalarVector& /*tempv*/, ScalarVector& lusup, Index& luptr, const Index lda, const Index nrow, IndexVector& lsub, const Index lptr, const Index no_zeros); }; template EIGEN_DONT_INLINE void LU_kernel_bmod<1>::run(const int /*segsize*/, BlockScalarVector& dense, ScalarVector& /*tempv*/, ScalarVector& lusup, Index& luptr, const Index lda, const Index nrow, IndexVector& lsub, const Index lptr, const Index no_zeros) { typedef typename ScalarVector::Scalar Scalar; Scalar f = dense(lsub(lptr + no_zeros)); luptr += lda * no_zeros + no_zeros + 1; const Scalar* a(lusup.data() + luptr); const /*typename IndexVector::Scalar*/Index* irow(lsub.data()+lptr + no_zeros + 1); Index i = 0; for (; i+1 < nrow; i+=2) { Index i0 = *(irow++); Index i1 = *(irow++); Scalar a0 = *(a++); Scalar a1 = *(a++); Scalar d0 = dense.coeff(i0); Scalar d1 = dense.coeff(i1); d0 -= f*a0; d1 -= f*a1; dense.coeffRef(i0) = d0; dense.coeffRef(i1) = d1; } if(i // Copyright (C) 2012 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSELU_SUPERNODAL_MATRIX_H #define EIGEN_SPARSELU_SUPERNODAL_MATRIX_H namespace Eigen { namespace internal { /** \ingroup SparseLU_Module * \brief a class to manipulate the L supernodal factor from the SparseLU factorization * * This class contain the data to easily store * and manipulate the supernodes during the factorization and solution phase of Sparse LU. * Only the lower triangular matrix has supernodes. * * NOTE : This class corresponds to the SCformat structure in SuperLU * */ /* TODO * InnerIterator as for sparsematrix * SuperInnerIterator to iterate through all supernodes * Function for triangular solve */ template class MappedSuperNodalMatrix { public: typedef _Scalar Scalar; typedef _Index Index; typedef Matrix IndexVector; typedef Matrix ScalarVector; public: MappedSuperNodalMatrix() { } MappedSuperNodalMatrix(Index m, Index n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind, IndexVector& rowind_colptr, IndexVector& col_to_sup, IndexVector& sup_to_col ) { setInfos(m, n, nzval, nzval_colptr, rowind, rowind_colptr, col_to_sup, sup_to_col); } ~MappedSuperNodalMatrix() { } /** * Set appropriate pointers for the lower triangular supernodal matrix * These infos are available at the end of the numerical factorization * FIXME This class will be modified such that it can be use in the course * of the factorization. */ void setInfos(Index m, Index n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind, IndexVector& rowind_colptr, IndexVector& col_to_sup, IndexVector& sup_to_col ) { m_row = m; m_col = n; m_nzval = nzval.data(); m_nzval_colptr = nzval_colptr.data(); m_rowind = rowind.data(); m_rowind_colptr = rowind_colptr.data(); m_nsuper = col_to_sup(n); m_col_to_sup = col_to_sup.data(); m_sup_to_col = sup_to_col.data(); } /** * Number of rows */ Index rows() { return m_row; } /** * Number of columns */ Index cols() { return m_col; } /** * Return the array of nonzero values packed by column * * The size is nnz */ Scalar* valuePtr() { return m_nzval; } const Scalar* valuePtr() const { return m_nzval; } /** * Return the pointers to the beginning of each column in \ref valuePtr() */ Index* colIndexPtr() { return m_nzval_colptr; } const Index* colIndexPtr() const { return m_nzval_colptr; } /** * Return the array of compressed row indices of all supernodes */ Index* rowIndex() { return m_rowind; } const Index* rowIndex() const { return m_rowind; } /** * Return the location in \em rowvaluePtr() which starts each column */ Index* rowIndexPtr() { return m_rowind_colptr; } const Index* rowIndexPtr() const { return m_rowind_colptr; } /** * Return the array of column-to-supernode mapping */ Index* colToSup() { return m_col_to_sup; } const Index* colToSup() const { return m_col_to_sup; } /** * Return the array of supernode-to-column mapping */ Index* supToCol() { return m_sup_to_col; } const Index* supToCol() const { return m_sup_to_col; } /** * Return the number of supernodes */ Index nsuper() const { return m_nsuper; } class InnerIterator; template void solveInPlace( MatrixBase&X) const; protected: Index m_row; // Number of rows Index m_col; // Number of columns Index m_nsuper; // Number of supernodes Scalar* m_nzval; //array of nonzero values packed by column Index* m_nzval_colptr; //nzval_colptr[j] Stores the location in nzval[] which starts column j Index* m_rowind; // Array of compressed row indices of rectangular supernodes Index* m_rowind_colptr; //rowind_colptr[j] stores the location in rowind[] which starts column j Index* m_col_to_sup; // col_to_sup[j] is the supernode number to which column j belongs Index* m_sup_to_col; //sup_to_col[s] points to the starting column of the s-th supernode private : }; /** * \brief InnerIterator class to iterate over nonzero values of the current column in the supernodal matrix L * */ template class MappedSuperNodalMatrix::InnerIterator { public: InnerIterator(const MappedSuperNodalMatrix& mat, Index outer) : m_matrix(mat), m_outer(outer), m_supno(mat.colToSup()[outer]), m_idval(mat.colIndexPtr()[outer]), m_startidval(m_idval), m_endidval(mat.colIndexPtr()[outer+1]), m_idrow(mat.rowIndexPtr()[mat.supToCol()[mat.colToSup()[outer]]]), m_endidrow(mat.rowIndexPtr()[mat.supToCol()[mat.colToSup()[outer]]+1]) {} inline InnerIterator& operator++() { m_idval++; m_idrow++; return *this; } inline Scalar value() const { return m_matrix.valuePtr()[m_idval]; } inline Scalar& valueRef() { return const_cast(m_matrix.valuePtr()[m_idval]); } inline Index index() const { return m_matrix.rowIndex()[m_idrow]; } inline Index row() const { return index(); } inline Index col() const { return m_outer; } inline Index supIndex() const { return m_supno; } inline operator bool() const { return ( (m_idval < m_endidval) && (m_idval >= m_startidval) && (m_idrow < m_endidrow) ); } protected: const MappedSuperNodalMatrix& m_matrix; // Supernodal lower triangular matrix const Index m_outer; // Current column const Index m_supno; // Current SuperNode number Index m_idval; // Index to browse the values in the current column const Index m_startidval; // Start of the column value const Index m_endidval; // End of the column value Index m_idrow; // Index to browse the row indices Index m_endidrow; // End index of row indices of the current column }; /** * \brief Solve with the supernode triangular matrix * */ template template void MappedSuperNodalMatrix::solveInPlace( MatrixBase&X) const { Index n = X.rows(); Index nrhs = X.cols(); const Scalar * Lval = valuePtr(); // Nonzero values Matrix work(n, nrhs); // working vector work.setZero(); for (Index k = 0; k <= nsuper(); k ++) { Index fsupc = supToCol()[k]; // First column of the current supernode Index istart = rowIndexPtr()[fsupc]; // Pointer index to the subscript of the current column Index nsupr = rowIndexPtr()[fsupc+1] - istart; // Number of rows in the current supernode Index nsupc = supToCol()[k+1] - fsupc; // Number of columns in the current supernode Index nrow = nsupr - nsupc; // Number of rows in the non-diagonal part of the supernode Index irow; //Current index row if (nsupc == 1 ) { for (Index j = 0; j < nrhs; j++) { InnerIterator it(*this, fsupc); ++it; // Skip the diagonal element for (; it; ++it) { irow = it.row(); X(irow, j) -= X(fsupc, j) * it.value(); } } } else { // The supernode has more than one column Index luptr = colIndexPtr()[fsupc]; Index lda = colIndexPtr()[fsupc+1] - luptr; // Triangular solve Map, 0, OuterStride<> > A( &(Lval[luptr]), nsupc, nsupc, OuterStride<>(lda) ); Map< Matrix, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) ); U = A.template triangularView().solve(U); // Matrix-vector product new (&A) Map, 0, OuterStride<> > ( &(Lval[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) ); work.block(0, 0, nrow, nrhs) = A * U; //Begin Scatter for (Index j = 0; j < nrhs; j++) { Index iptr = istart + nsupc; for (Index i = 0; i < nrow; i++) { irow = rowIndex()[iptr]; X(irow, j) -= work(i, j); // Scatter operation work(i, j) = Scalar(0); iptr++; } } } } } } // end namespace internal } // end namespace Eigen #endif // EIGEN_SPARSELU_MATRIX_H antimony/lib/fab/vendor/Eigen/src/SparseLU/SparseLU_gemm_kernel.h0000644000175000017500000002346013341130426024231 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSELU_GEMM_KERNEL_H #define EIGEN_SPARSELU_GEMM_KERNEL_H namespace Eigen { namespace internal { /** \internal * A general matrix-matrix product kernel optimized for the SparseLU factorization. * - A, B, and C must be column major * - lda and ldc must be multiples of the respective packet size * - C must have the same alignment as A */ template EIGEN_DONT_INLINE void sparselu_gemm(Index m, Index n, Index d, const Scalar* A, Index lda, const Scalar* B, Index ldb, Scalar* C, Index ldc) { using namespace Eigen::internal; typedef typename packet_traits::type Packet; enum { NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS, PacketSize = packet_traits::size, PM = 8, // peeling in M RN = 2, // register blocking RK = NumberOfRegisters>=16 ? 4 : 2, // register blocking BM = 4096/sizeof(Scalar), // number of rows of A-C per chunk SM = PM*PacketSize // step along M }; Index d_end = (d/RK)*RK; // number of columns of A (rows of B) suitable for full register blocking Index n_end = (n/RN)*RN; // number of columns of B-C suitable for processing RN columns at once Index i0 = internal::first_aligned(A,m); eigen_internal_assert(((lda%PacketSize)==0) && ((ldc%PacketSize)==0) && (i0==internal::first_aligned(C,m))); // handle the non aligned rows of A and C without any optimization: for(Index i=0; i(BM, m-ib); // actual number of rows Index actual_b_end1 = (actual_b/SM)*SM; // actual number of rows suitable for peeling Index actual_b_end2 = (actual_b/PacketSize)*PacketSize; // actual number of rows suitable for vectorization // Let's process two columns of B-C at once for(Index j=0; j(Bc0[0]); b10 = pset1(Bc0[1]); if(RK==4) b20 = pset1(Bc0[2]); if(RK==4) b30 = pset1(Bc0[3]); b01 = pset1(Bc1[0]); b11 = pset1(Bc1[1]); if(RK==4) b21 = pset1(Bc1[2]); if(RK==4) b31 = pset1(Bc1[3]); Packet a0, a1, a2, a3, c0, c1, t0, t1; const Scalar* A0 = A+ib+(k+0)*lda; const Scalar* A1 = A+ib+(k+1)*lda; const Scalar* A2 = A+ib+(k+2)*lda; const Scalar* A3 = A+ib+(k+3)*lda; Scalar* C0 = C+ib+(j+0)*ldc; Scalar* C1 = C+ib+(j+1)*ldc; a0 = pload(A0); a1 = pload(A1); if(RK==4) { a2 = pload(A2); a3 = pload(A3); } else { // workaround "may be used uninitialized in this function" warning a2 = a3 = a0; } #define KMADD(c, a, b, tmp) {tmp = b; tmp = pmul(a,tmp); c = padd(c,tmp);} #define WORK(I) \ c0 = pload(C0+i+(I)*PacketSize); \ c1 = pload(C1+i+(I)*PacketSize); \ KMADD(c0, a0, b00, t0) \ KMADD(c1, a0, b01, t1) \ a0 = pload(A0+i+(I+1)*PacketSize); \ KMADD(c0, a1, b10, t0) \ KMADD(c1, a1, b11, t1) \ a1 = pload(A1+i+(I+1)*PacketSize); \ if(RK==4) KMADD(c0, a2, b20, t0) \ if(RK==4) KMADD(c1, a2, b21, t1) \ if(RK==4) a2 = pload(A2+i+(I+1)*PacketSize); \ if(RK==4) KMADD(c0, a3, b30, t0) \ if(RK==4) KMADD(c1, a3, b31, t1) \ if(RK==4) a3 = pload(A3+i+(I+1)*PacketSize); \ pstore(C0+i+(I)*PacketSize, c0); \ pstore(C1+i+(I)*PacketSize, c1) // process rows of A' - C' with aggressive vectorization and peeling for(Index i=0; i0) { const Scalar* Bc0 = B+(n-1)*ldb; for(Index k=0; k(Bc0[0]); b10 = pset1(Bc0[1]); if(RK==4) b20 = pset1(Bc0[2]); if(RK==4) b30 = pset1(Bc0[3]); Packet a0, a1, a2, a3, c0, t0/*, t1*/; const Scalar* A0 = A+ib+(k+0)*lda; const Scalar* A1 = A+ib+(k+1)*lda; const Scalar* A2 = A+ib+(k+2)*lda; const Scalar* A3 = A+ib+(k+3)*lda; Scalar* C0 = C+ib+(n_end)*ldc; a0 = pload(A0); a1 = pload(A1); if(RK==4) { a2 = pload(A2); a3 = pload(A3); } else { // workaround "may be used uninitialized in this function" warning a2 = a3 = a0; } #define WORK(I) \ c0 = pload(C0+i+(I)*PacketSize); \ KMADD(c0, a0, b00, t0) \ a0 = pload(A0+i+(I+1)*PacketSize); \ KMADD(c0, a1, b10, t0) \ a1 = pload(A1+i+(I+1)*PacketSize); \ if(RK==4) KMADD(c0, a2, b20, t0) \ if(RK==4) a2 = pload(A2+i+(I+1)*PacketSize); \ if(RK==4) KMADD(c0, a3, b30, t0) \ if(RK==4) a3 = pload(A3+i+(I+1)*PacketSize); \ pstore(C0+i+(I)*PacketSize, c0); // agressive vectorization and peeling for(Index i=0; i0) { for(Index j=0; j1 ? Aligned : 0 }; typedef Map, Alignment > MapVector; typedef Map, Alignment > ConstMapVector; if(rd==1) MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b); else if(rd==2) MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b) + B[1+d_end+j*ldb] * ConstMapVector(A+(d_end+1)*lda+ib, actual_b); else MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b) + B[1+d_end+j*ldb] * ConstMapVector(A+(d_end+1)*lda+ib, actual_b) + B[2+d_end+j*ldb] * ConstMapVector(A+(d_end+2)*lda+ib, actual_b); } } } // blocking on the rows of A and C } #undef KMADD } // namespace internal } // namespace Eigen #endif // EIGEN_SPARSELU_GEMM_KERNEL_H antimony/lib/fab/vendor/Eigen/src/SparseLU/SparseLU_column_dfs.h0000644000175000017500000001426113341130426024074 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. /* * NOTE: This file is the modified version of [s,d,c,z]column_dfs.c file in SuperLU * -- SuperLU routine (version 2.0) -- * Univ. of California Berkeley, Xerox Palo Alto Research Center, * and Lawrence Berkeley National Lab. * November 15, 1997 * * Copyright (c) 1994 by Xerox Corporation. All rights reserved. * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program for any * purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is * granted, provided the above notices are retained, and a notice that * the code was modified is included with the above copyright notice. */ #ifndef SPARSELU_COLUMN_DFS_H #define SPARSELU_COLUMN_DFS_H template class SparseLUImpl; namespace Eigen { namespace internal { template struct column_dfs_traits : no_assignment_operator { typedef typename ScalarVector::Scalar Scalar; typedef typename IndexVector::Scalar Index; column_dfs_traits(Index jcol, Index& jsuper, typename SparseLUImpl::GlobalLU_t& glu, SparseLUImpl& luImpl) : m_jcol(jcol), m_jsuper_ref(jsuper), m_glu(glu), m_luImpl(luImpl) {} bool update_segrep(Index /*krep*/, Index /*jj*/) { return true; } void mem_expand(IndexVector& lsub, Index& nextl, Index chmark) { if (nextl >= m_glu.nzlmax) m_luImpl.memXpand(lsub, m_glu.nzlmax, nextl, LSUB, m_glu.num_expansions); if (chmark != (m_jcol-1)) m_jsuper_ref = emptyIdxLU; } enum { ExpandMem = true }; Index m_jcol; Index& m_jsuper_ref; typename SparseLUImpl::GlobalLU_t& m_glu; SparseLUImpl& m_luImpl; }; /** * \brief Performs a symbolic factorization on column jcol and decide the supernode boundary * * A supernode representative is the last column of a supernode. * The nonzeros in U[*,j] are segments that end at supernodes representatives. * The routine returns a list of the supernodal representatives * in topological order of the dfs that generates them. * The location of the first nonzero in each supernodal segment * (supernodal entry location) is also returned. * * \param m number of rows in the matrix * \param jcol Current column * \param perm_r Row permutation * \param maxsuper Maximum number of column allowed in a supernode * \param [in,out] nseg Number of segments in current U[*,j] - new segments appended * \param lsub_col defines the rhs vector to start the dfs * \param [in,out] segrep Segment representatives - new segments appended * \param repfnz First nonzero location in each row * \param xprune * \param marker marker[i] == jj, if i was visited during dfs of current column jj; * \param parent * \param xplore working array * \param glu global LU data * \return 0 success * > 0 number of bytes allocated when run out of space * */ template Index SparseLUImpl::column_dfs(const Index m, const Index jcol, IndexVector& perm_r, Index maxsuper, Index& nseg, BlockIndexVector lsub_col, IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu) { Index jsuper = glu.supno(jcol); Index nextl = glu.xlsub(jcol); VectorBlock marker2(marker, 2*m, m); column_dfs_traits traits(jcol, jsuper, glu, *this); // For each nonzero in A(*,jcol) do dfs for (Index k = 0; ((k < m) ? lsub_col[k] != emptyIdxLU : false) ; k++) { Index krow = lsub_col(k); lsub_col(k) = emptyIdxLU; Index kmark = marker2(krow); // krow was visited before, go to the next nonz; if (kmark == jcol) continue; dfs_kernel(jcol, perm_r, nseg, glu.lsub, segrep, repfnz, xprune, marker2, parent, xplore, glu, nextl, krow, traits); } // for each nonzero ... Index fsupc, jptr, jm1ptr, ito, ifrom, istop; Index nsuper = glu.supno(jcol); Index jcolp1 = jcol + 1; Index jcolm1 = jcol - 1; // check to see if j belongs in the same supernode as j-1 if ( jcol == 0 ) { // Do nothing for column 0 nsuper = glu.supno(0) = 0 ; } else { fsupc = glu.xsup(nsuper); jptr = glu.xlsub(jcol); // Not yet compressed jm1ptr = glu.xlsub(jcolm1); // Use supernodes of type T2 : see SuperLU paper if ( (nextl-jptr != jptr-jm1ptr-1) ) jsuper = emptyIdxLU; // Make sure the number of columns in a supernode doesn't // exceed threshold if ( (jcol - fsupc) >= maxsuper) jsuper = emptyIdxLU; /* If jcol starts a new supernode, reclaim storage space in * glu.lsub from previous supernode. Note we only store * the subscript set of the first and last columns of * a supernode. (first for num values, last for pruning) */ if (jsuper == emptyIdxLU) { // starts a new supernode if ( (fsupc < jcolm1-1) ) { // >= 3 columns in nsuper ito = glu.xlsub(fsupc+1); glu.xlsub(jcolm1) = ito; istop = ito + jptr - jm1ptr; xprune(jcolm1) = istop; // intialize xprune(jcol-1) glu.xlsub(jcol) = istop; for (ifrom = jm1ptr; ifrom < nextl; ++ifrom, ++ito) glu.lsub(ito) = glu.lsub(ifrom); nextl = ito; // = istop + length(jcol) } nsuper++; glu.supno(jcol) = nsuper; } // if a new supernode } // end else: jcol > 0 // Tidy up the pointers before exit glu.xsup(nsuper+1) = jcolp1; glu.supno(jcolp1) = nsuper; xprune(jcol) = nextl; // Intialize upper bound for pruning glu.xlsub(jcolp1) = nextl; return 0; } } // end namespace internal } // end namespace Eigen #endif antimony/lib/fab/vendor/Eigen/src/SparseLU/SparseLU_pivotL.h0000644000175000017500000001120713341130426023215 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. /* * NOTE: This file is the modified version of xpivotL.c file in SuperLU * -- SuperLU routine (version 3.0) -- * Univ. of California Berkeley, Xerox Palo Alto Research Center, * and Lawrence Berkeley National Lab. * October 15, 2003 * * Copyright (c) 1994 by Xerox Corporation. All rights reserved. * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program for any * purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is * granted, provided the above notices are retained, and a notice that * the code was modified is included with the above copyright notice. */ #ifndef SPARSELU_PIVOTL_H #define SPARSELU_PIVOTL_H namespace Eigen { namespace internal { /** * \brief Performs the numerical pivotin on the current column of L, and the CDIV operation. * * Pivot policy : * (1) Compute thresh = u * max_(i>=j) abs(A_ij); * (2) IF user specifies pivot row k and abs(A_kj) >= thresh THEN * pivot row = k; * ELSE IF abs(A_jj) >= thresh THEN * pivot row = j; * ELSE * pivot row = m; * * Note: If you absolutely want to use a given pivot order, then set u=0.0. * * \param jcol The current column of L * \param diagpivotthresh diagonal pivoting threshold * \param[in,out] perm_r Row permutation (threshold pivoting) * \param[in] iperm_c column permutation - used to finf diagonal of Pc*A*Pc' * \param[out] pivrow The pivot row * \param glu Global LU data * \return 0 if success, i > 0 if U(i,i) is exactly zero * */ template Index SparseLUImpl::pivotL(const Index jcol, const RealScalar& diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, Index& pivrow, GlobalLU_t& glu) { Index fsupc = (glu.xsup)((glu.supno)(jcol)); // First column in the supernode containing the column jcol Index nsupc = jcol - fsupc; // Number of columns in the supernode portion, excluding jcol; nsupc >=0 Index lptr = glu.xlsub(fsupc); // pointer to the starting location of the row subscripts for this supernode portion Index nsupr = glu.xlsub(fsupc+1) - lptr; // Number of rows in the supernode Index lda = glu.xlusup(fsupc+1) - glu.xlusup(fsupc); // leading dimension Scalar* lu_sup_ptr = &(glu.lusup.data()[glu.xlusup(fsupc)]); // Start of the current supernode Scalar* lu_col_ptr = &(glu.lusup.data()[glu.xlusup(jcol)]); // Start of jcol in the supernode Index* lsub_ptr = &(glu.lsub.data()[lptr]); // Start of row indices of the supernode // Determine the largest abs numerical value for partial pivoting Index diagind = iperm_c(jcol); // diagonal index RealScalar pivmax = 0.0; Index pivptr = nsupc; Index diag = emptyIdxLU; RealScalar rtemp; Index isub, icol, itemp, k; for (isub = nsupc; isub < nsupr; ++isub) { rtemp = std::abs(lu_col_ptr[isub]); if (rtemp > pivmax) { pivmax = rtemp; pivptr = isub; } if (lsub_ptr[isub] == diagind) diag = isub; } // Test for singularity if ( pivmax == 0.0 ) { pivrow = lsub_ptr[pivptr]; perm_r(pivrow) = jcol; return (jcol+1); } RealScalar thresh = diagpivotthresh * pivmax; // Choose appropriate pivotal element { // Test if the diagonal element can be used as a pivot (given the threshold value) if (diag >= 0 ) { // Diagonal element exists rtemp = std::abs(lu_col_ptr[diag]); if (rtemp != 0.0 && rtemp >= thresh) pivptr = diag; } pivrow = lsub_ptr[pivptr]; } // Record pivot row perm_r(pivrow) = jcol; // Interchange row subscripts if (pivptr != nsupc ) { std::swap( lsub_ptr[pivptr], lsub_ptr[nsupc] ); // Interchange numerical values as well, for the two rows in the whole snode // such that L is indexed the same way as A for (icol = 0; icol <= nsupc; icol++) { itemp = pivptr + icol * lda; std::swap(lu_sup_ptr[itemp], lu_sup_ptr[nsupc + icol * lda]); } } // cdiv operations Scalar temp = Scalar(1.0) / lu_col_ptr[nsupc]; for (k = nsupc+1; k < nsupr; k++) lu_col_ptr[k] *= temp; return 0; } } // end namespace internal } // end namespace Eigen #endif // SPARSELU_PIVOTL_H antimony/lib/fab/vendor/Eigen/src/SparseLU/SparseLU_panel_bmod.h0000644000175000017500000002062113341130426024040 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam // Copyright (C) 2012 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. /* * NOTE: This file is the modified version of [s,d,c,z]panel_bmod.c file in SuperLU * -- SuperLU routine (version 3.0) -- * Univ. of California Berkeley, Xerox Palo Alto Research Center, * and Lawrence Berkeley National Lab. * October 15, 2003 * * Copyright (c) 1994 by Xerox Corporation. All rights reserved. * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program for any * purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is * granted, provided the above notices are retained, and a notice that * the code was modified is included with the above copyright notice. */ #ifndef SPARSELU_PANEL_BMOD_H #define SPARSELU_PANEL_BMOD_H namespace Eigen { namespace internal { /** * \brief Performs numeric block updates (sup-panel) in topological order. * * Before entering this routine, the original nonzeros in the panel * were already copied i nto the spa[m,w] * * \param m number of rows in the matrix * \param w Panel size * \param jcol Starting column of the panel * \param nseg Number of segments in the U part * \param dense Store the full representation of the panel * \param tempv working array * \param segrep segment representative... first row in the segment * \param repfnz First nonzero rows * \param glu Global LU data. * * */ template void SparseLUImpl::panel_bmod(const Index m, const Index w, const Index jcol, const Index nseg, ScalarVector& dense, ScalarVector& tempv, IndexVector& segrep, IndexVector& repfnz, GlobalLU_t& glu) { Index ksub,jj,nextl_col; Index fsupc, nsupc, nsupr, nrow; Index krep, kfnz; Index lptr; // points to the row subscripts of a supernode Index luptr; // ... Index segsize,no_zeros ; // For each nonz supernode segment of U[*,j] in topological order Index k = nseg - 1; const Index PacketSize = internal::packet_traits::size; for (ksub = 0; ksub < nseg; ksub++) { // For each updating supernode /* krep = representative of current k-th supernode * fsupc = first supernodal column * nsupc = number of columns in a supernode * nsupr = number of rows in a supernode */ krep = segrep(k); k--; fsupc = glu.xsup(glu.supno(krep)); nsupc = krep - fsupc + 1; nsupr = glu.xlsub(fsupc+1) - glu.xlsub(fsupc); nrow = nsupr - nsupc; lptr = glu.xlsub(fsupc); // loop over the panel columns to detect the actual number of columns and rows Index u_rows = 0; Index u_cols = 0; for (jj = jcol; jj < jcol + w; jj++) { nextl_col = (jj-jcol) * m; VectorBlock repfnz_col(repfnz, nextl_col, m); // First nonzero column index for each row kfnz = repfnz_col(krep); if ( kfnz == emptyIdxLU ) continue; // skip any zero segment segsize = krep - kfnz + 1; u_cols++; u_rows = (std::max)(segsize,u_rows); } if(nsupc >= 2) { Index ldu = internal::first_multiple(u_rows, PacketSize); Map, Aligned, OuterStride<> > U(tempv.data(), u_rows, u_cols, OuterStride<>(ldu)); // gather U Index u_col = 0; for (jj = jcol; jj < jcol + w; jj++) { nextl_col = (jj-jcol) * m; VectorBlock repfnz_col(repfnz, nextl_col, m); // First nonzero column index for each row VectorBlock dense_col(dense, nextl_col, m); // Scatter/gather entire matrix column from/to here kfnz = repfnz_col(krep); if ( kfnz == emptyIdxLU ) continue; // skip any zero segment segsize = krep - kfnz + 1; luptr = glu.xlusup(fsupc); no_zeros = kfnz - fsupc; Index isub = lptr + no_zeros; Index off = u_rows-segsize; for (Index i = 0; i < off; i++) U(i,u_col) = 0; for (Index i = 0; i < segsize; i++) { Index irow = glu.lsub(isub); U(i+off,u_col) = dense_col(irow); ++isub; } u_col++; } // solve U = A^-1 U luptr = glu.xlusup(fsupc); Index lda = glu.xlusup(fsupc+1) - glu.xlusup(fsupc); no_zeros = (krep - u_rows + 1) - fsupc; luptr += lda * no_zeros + no_zeros; Map, 0, OuterStride<> > A(glu.lusup.data()+luptr, u_rows, u_rows, OuterStride<>(lda) ); U = A.template triangularView().solve(U); // update luptr += u_rows; Map, 0, OuterStride<> > B(glu.lusup.data()+luptr, nrow, u_rows, OuterStride<>(lda) ); eigen_assert(tempv.size()>w*ldu + nrow*w + 1); Index ldl = internal::first_multiple(nrow, PacketSize); Index offset = (PacketSize-internal::first_aligned(B.data(), PacketSize)) % PacketSize; Map, 0, OuterStride<> > L(tempv.data()+w*ldu+offset, nrow, u_cols, OuterStride<>(ldl)); L.setZero(); internal::sparselu_gemm(L.rows(), L.cols(), B.cols(), B.data(), B.outerStride(), U.data(), U.outerStride(), L.data(), L.outerStride()); // scatter U and L u_col = 0; for (jj = jcol; jj < jcol + w; jj++) { nextl_col = (jj-jcol) * m; VectorBlock repfnz_col(repfnz, nextl_col, m); // First nonzero column index for each row VectorBlock dense_col(dense, nextl_col, m); // Scatter/gather entire matrix column from/to here kfnz = repfnz_col(krep); if ( kfnz == emptyIdxLU ) continue; // skip any zero segment segsize = krep - kfnz + 1; no_zeros = kfnz - fsupc; Index isub = lptr + no_zeros; Index off = u_rows-segsize; for (Index i = 0; i < segsize; i++) { Index irow = glu.lsub(isub++); dense_col(irow) = U.coeff(i+off,u_col); U.coeffRef(i+off,u_col) = 0; } // Scatter l into SPA dense[] for (Index i = 0; i < nrow; i++) { Index irow = glu.lsub(isub++); dense_col(irow) -= L.coeff(i,u_col); L.coeffRef(i,u_col) = 0; } u_col++; } } else // level 2 only { // Sequence through each column in the panel for (jj = jcol; jj < jcol + w; jj++) { nextl_col = (jj-jcol) * m; VectorBlock repfnz_col(repfnz, nextl_col, m); // First nonzero column index for each row VectorBlock dense_col(dense, nextl_col, m); // Scatter/gather entire matrix column from/to here kfnz = repfnz_col(krep); if ( kfnz == emptyIdxLU ) continue; // skip any zero segment segsize = krep - kfnz + 1; luptr = glu.xlusup(fsupc); Index lda = glu.xlusup(fsupc+1)-glu.xlusup(fsupc);// nsupr // Perform a trianglar solve and block update, // then scatter the result of sup-col update to dense[] no_zeros = kfnz - fsupc; if(segsize==1) LU_kernel_bmod<1>::run(segsize, dense_col, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros); else if(segsize==2) LU_kernel_bmod<2>::run(segsize, dense_col, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros); else if(segsize==3) LU_kernel_bmod<3>::run(segsize, dense_col, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros); else LU_kernel_bmod::run(segsize, dense_col, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros); } // End for each column in the panel } } // End for each updating supernode } // end panel bmod } // end namespace internal } // end namespace Eigen #endif // SPARSELU_PANEL_BMOD_H antimony/lib/fab/vendor/Eigen/src/SparseLU/SparseLU_pruneL.h0000644000175000017500000001056613341130426023214 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. /* * NOTE: This file is the modified version of [s,d,c,z]pruneL.c file in SuperLU * -- SuperLU routine (version 2.0) -- * Univ. of California Berkeley, Xerox Palo Alto Research Center, * and Lawrence Berkeley National Lab. * November 15, 1997 * * Copyright (c) 1994 by Xerox Corporation. All rights reserved. * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program for any * purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is * granted, provided the above notices are retained, and a notice that * the code was modified is included with the above copyright notice. */ #ifndef SPARSELU_PRUNEL_H #define SPARSELU_PRUNEL_H namespace Eigen { namespace internal { /** * \brief Prunes the L-structure. * * It prunes the L-structure of supernodes whose L-structure contains the current pivot row "pivrow" * * * \param jcol The current column of L * \param[in] perm_r Row permutation * \param[out] pivrow The pivot row * \param nseg Number of segments * \param segrep * \param repfnz * \param[out] xprune * \param glu Global LU data * */ template void SparseLUImpl::pruneL(const Index jcol, const IndexVector& perm_r, const Index pivrow, const Index nseg, const IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, GlobalLU_t& glu) { // For each supernode-rep irep in U(*,j] Index jsupno = glu.supno(jcol); Index i,irep,irep1; bool movnum, do_prune = false; Index kmin = 0, kmax = 0, minloc, maxloc,krow; for (i = 0; i < nseg; i++) { irep = segrep(i); irep1 = irep + 1; do_prune = false; // Don't prune with a zero U-segment if (repfnz(irep) == emptyIdxLU) continue; // If a snode overlaps with the next panel, then the U-segment // is fragmented into two parts -- irep and irep1. We should let // pruning occur at the rep-column in irep1s snode. if (glu.supno(irep) == glu.supno(irep1) ) continue; // don't prune // If it has not been pruned & it has a nonz in row L(pivrow,i) if (glu.supno(irep) != jsupno ) { if ( xprune (irep) >= glu.xlsub(irep1) ) { kmin = glu.xlsub(irep); kmax = glu.xlsub(irep1) - 1; for (krow = kmin; krow <= kmax; krow++) { if (glu.lsub(krow) == pivrow) { do_prune = true; break; } } } if (do_prune) { // do a quicksort-type partition // movnum=true means that the num values have to be exchanged movnum = false; if (irep == glu.xsup(glu.supno(irep)) ) // Snode of size 1 movnum = true; while (kmin <= kmax) { if (perm_r(glu.lsub(kmax)) == emptyIdxLU) kmax--; else if ( perm_r(glu.lsub(kmin)) != emptyIdxLU) kmin++; else { // kmin below pivrow (not yet pivoted), and kmax // above pivrow: interchange the two suscripts std::swap(glu.lsub(kmin), glu.lsub(kmax)); // If the supernode has only one column, then we // only keep one set of subscripts. For any subscript // intercnahge performed, similar interchange must be // done on the numerical values. if (movnum) { minloc = glu.xlusup(irep) + ( kmin - glu.xlsub(irep) ); maxloc = glu.xlusup(irep) + ( kmax - glu.xlsub(irep) ); std::swap(glu.lusup(minloc), glu.lusup(maxloc)); } kmin++; kmax--; } } // end while xprune(irep) = kmin; //Pruning } // end if do_prune } // end pruning } // End for each U-segment } } // end namespace internal } // end namespace Eigen #endif // SPARSELU_PRUNEL_H antimony/lib/fab/vendor/Eigen/src/SparseLU/SparseLU_Utils.h0000644000175000017500000000373613341130426023050 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSELU_UTILS_H #define EIGEN_SPARSELU_UTILS_H namespace Eigen { namespace internal { /** * \brief Count Nonzero elements in the factors */ template void SparseLUImpl::countnz(const Index n, Index& nnzL, Index& nnzU, GlobalLU_t& glu) { nnzL = 0; nnzU = (glu.xusub)(n); Index nsuper = (glu.supno)(n); Index jlen; Index i, j, fsupc; if (n <= 0 ) return; // For each supernode for (i = 0; i <= nsuper; i++) { fsupc = glu.xsup(i); jlen = glu.xlsub(fsupc+1) - glu.xlsub(fsupc); for (j = fsupc; j < glu.xsup(i+1); j++) { nnzL += jlen; nnzU += j - fsupc + 1; jlen--; } } } /** * \brief Fix up the data storage lsub for L-subscripts. * * It removes the subscripts sets for structural pruning, * and applies permutation to the remaining subscripts * */ template void SparseLUImpl::fixupL(const Index n, const IndexVector& perm_r, GlobalLU_t& glu) { Index fsupc, i, j, k, jstart; Index nextl = 0; Index nsuper = (glu.supno)(n); // For each supernode for (i = 0; i <= nsuper; i++) { fsupc = glu.xsup(i); jstart = glu.xlsub(fsupc); glu.xlsub(fsupc) = nextl; for (j = jstart; j < glu.xlsub(fsupc + 1); j++) { glu.lsub(nextl) = perm_r(glu.lsub(j)); // Now indexed into P*A nextl++; } for (k = fsupc+1; k < glu.xsup(i+1); k++) glu.xlsub(k) = nextl; // other columns in supernode i } glu.xlsub(n) = nextl; } } // end namespace internal } // end namespace Eigen #endif // EIGEN_SPARSELU_UTILS_H antimony/lib/fab/vendor/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h0000644000175000017500000001001113341130426025230 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. /* This file is a modified version of heap_relax_snode.c file in SuperLU * -- SuperLU routine (version 3.0) -- * Univ. of California Berkeley, Xerox Palo Alto Research Center, * and Lawrence Berkeley National Lab. * October 15, 2003 * * Copyright (c) 1994 by Xerox Corporation. All rights reserved. * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program for any * purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is * granted, provided the above notices are retained, and a notice that * the code was modified is included with the above copyright notice. */ #ifndef SPARSELU_HEAP_RELAX_SNODE_H #define SPARSELU_HEAP_RELAX_SNODE_H namespace Eigen { namespace internal { /** * \brief Identify the initial relaxed supernodes * * This routine applied to a symmetric elimination tree. * It assumes that the matrix has been reordered according to the postorder of the etree * \param n The number of columns * \param et elimination tree * \param relax_columns Maximum number of columns allowed in a relaxed snode * \param descendants Number of descendants of each node in the etree * \param relax_end last column in a supernode */ template void SparseLUImpl::heap_relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end) { // The etree may not be postordered, but its heap ordered IndexVector post; internal::treePostorder(n, et, post); // Post order etree IndexVector inv_post(n+1); Index i; for (i = 0; i < n+1; ++i) inv_post(post(i)) = i; // inv_post = post.inverse()??? // Renumber etree in postorder IndexVector iwork(n); IndexVector et_save(n+1); for (i = 0; i < n; ++i) { iwork(post(i)) = post(et(i)); } et_save = et; // Save the original etree et = iwork; // compute the number of descendants of each node in the etree relax_end.setConstant(emptyIdxLU); Index j, parent; descendants.setZero(); for (j = 0; j < n; j++) { parent = et(j); if (parent != n) // not the dummy root descendants(parent) += descendants(j) + 1; } // Identify the relaxed supernodes by postorder traversal of the etree Index snode_start; // beginning of a snode Index k; Index nsuper_et_post = 0; // Number of relaxed snodes in postordered etree Index nsuper_et = 0; // Number of relaxed snodes in the original etree Index l; for (j = 0; j < n; ) { parent = et(j); snode_start = j; while ( parent != n && descendants(parent) < relax_columns ) { j = parent; parent = et(j); } // Found a supernode in postordered etree, j is the last column ++nsuper_et_post; k = n; for (i = snode_start; i <= j; ++i) k = (std::min)(k, inv_post(i)); l = inv_post(j); if ( (l - k) == (j - snode_start) ) // Same number of columns in the snode { // This is also a supernode in the original etree relax_end(k) = l; // Record last column ++nsuper_et; } else { for (i = snode_start; i <= j; ++i) { l = inv_post(i); if (descendants(i) == 0) { relax_end(l) = l; ++nsuper_et; } } } j++; // Search for a new leaf while (descendants(j) != 0 && j < n) j++; } // End postorder traversal of the etree // Recover the original etree et = et_save; } } // end namespace internal } // end namespace Eigen #endif // SPARSELU_HEAP_RELAX_SNODE_H antimony/lib/fab/vendor/Eigen/src/SparseLU/SparseLU_panel_dfs.h0000644000175000017500000002121613341130426023674 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. /* * NOTE: This file is the modified version of [s,d,c,z]panel_dfs.c file in SuperLU * -- SuperLU routine (version 2.0) -- * Univ. of California Berkeley, Xerox Palo Alto Research Center, * and Lawrence Berkeley National Lab. * November 15, 1997 * * Copyright (c) 1994 by Xerox Corporation. All rights reserved. * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program for any * purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is * granted, provided the above notices are retained, and a notice that * the code was modified is included with the above copyright notice. */ #ifndef SPARSELU_PANEL_DFS_H #define SPARSELU_PANEL_DFS_H namespace Eigen { namespace internal { template struct panel_dfs_traits { typedef typename IndexVector::Scalar Index; panel_dfs_traits(Index jcol, Index* marker) : m_jcol(jcol), m_marker(marker) {} bool update_segrep(Index krep, Index jj) { if(m_marker[krep] template void SparseLUImpl::dfs_kernel(const Index jj, IndexVector& perm_r, Index& nseg, IndexVector& panel_lsub, IndexVector& segrep, Ref repfnz_col, IndexVector& xprune, Ref marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu, Index& nextl_col, Index krow, Traits& traits ) { Index kmark = marker(krow); // For each unmarked krow of jj marker(krow) = jj; Index kperm = perm_r(krow); if (kperm == emptyIdxLU ) { // krow is in L : place it in structure of L(*, jj) panel_lsub(nextl_col++) = krow; // krow is indexed into A traits.mem_expand(panel_lsub, nextl_col, kmark); } else { // krow is in U : if its supernode-representative krep // has been explored, update repfnz(*) // krep = supernode representative of the current row Index krep = glu.xsup(glu.supno(kperm)+1) - 1; // First nonzero element in the current column: Index myfnz = repfnz_col(krep); if (myfnz != emptyIdxLU ) { // Representative visited before if (myfnz > kperm ) repfnz_col(krep) = kperm; } else { // Otherwise, perform dfs starting at krep Index oldrep = emptyIdxLU; parent(krep) = oldrep; repfnz_col(krep) = kperm; Index xdfs = glu.xlsub(krep); Index maxdfs = xprune(krep); Index kpar; do { // For each unmarked kchild of krep while (xdfs < maxdfs) { Index kchild = glu.lsub(xdfs); xdfs++; Index chmark = marker(kchild); if (chmark != jj ) { marker(kchild) = jj; Index chperm = perm_r(kchild); if (chperm == emptyIdxLU) { // case kchild is in L: place it in L(*, j) panel_lsub(nextl_col++) = kchild; traits.mem_expand(panel_lsub, nextl_col, chmark); } else { // case kchild is in U : // chrep = its supernode-rep. If its rep has been explored, // update its repfnz(*) Index chrep = glu.xsup(glu.supno(chperm)+1) - 1; myfnz = repfnz_col(chrep); if (myfnz != emptyIdxLU) { // Visited before if (myfnz > chperm) repfnz_col(chrep) = chperm; } else { // Cont. dfs at snode-rep of kchild xplore(krep) = xdfs; oldrep = krep; krep = chrep; // Go deeper down G(L) parent(krep) = oldrep; repfnz_col(krep) = chperm; xdfs = glu.xlsub(krep); maxdfs = xprune(krep); } // end if myfnz != -1 } // end if chperm == -1 } // end if chmark !=jj } // end while xdfs < maxdfs // krow has no more unexplored nbrs : // Place snode-rep krep in postorder DFS, if this // segment is seen for the first time. (Note that // "repfnz(krep)" may change later.) // Baktrack dfs to its parent if(traits.update_segrep(krep,jj)) //if (marker1(krep) < jcol ) { segrep(nseg) = krep; ++nseg; //marker1(krep) = jj; } kpar = parent(krep); // Pop recursion, mimic recursion if (kpar == emptyIdxLU) break; // dfs done krep = kpar; xdfs = xplore(krep); maxdfs = xprune(krep); } while (kpar != emptyIdxLU); // Do until empty stack } // end if (myfnz = -1) } // end if (kperm == -1) } /** * \brief Performs a symbolic factorization on a panel of columns [jcol, jcol+w) * * A supernode representative is the last column of a supernode. * The nonzeros in U[*,j] are segments that end at supernodes representatives * * The routine returns a list of the supernodal representatives * in topological order of the dfs that generates them. This list is * a superset of the topological order of each individual column within * the panel. * The location of the first nonzero in each supernodal segment * (supernodal entry location) is also returned. Each column has * a separate list for this purpose. * * Two markers arrays are used for dfs : * marker[i] == jj, if i was visited during dfs of current column jj; * marker1[i] >= jcol, if i was visited by earlier columns in this panel; * * \param[in] m number of rows in the matrix * \param[in] w Panel size * \param[in] jcol Starting column of the panel * \param[in] A Input matrix in column-major storage * \param[in] perm_r Row permutation * \param[out] nseg Number of U segments * \param[out] dense Accumulate the column vectors of the panel * \param[out] panel_lsub Subscripts of the row in the panel * \param[out] segrep Segment representative i.e first nonzero row of each segment * \param[out] repfnz First nonzero location in each row * \param[out] xprune The pruned elimination tree * \param[out] marker work vector * \param parent The elimination tree * \param xplore work vector * \param glu The global data structure * */ template void SparseLUImpl::panel_dfs(const Index m, const Index w, const Index jcol, MatrixType& A, IndexVector& perm_r, Index& nseg, ScalarVector& dense, IndexVector& panel_lsub, IndexVector& segrep, IndexVector& repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu) { Index nextl_col; // Next available position in panel_lsub[*,jj] // Initialize pointers VectorBlock marker1(marker, m, m); nseg = 0; panel_dfs_traits traits(jcol, marker1.data()); // For each column in the panel for (Index jj = jcol; jj < jcol + w; jj++) { nextl_col = (jj - jcol) * m; VectorBlock repfnz_col(repfnz, nextl_col, m); // First nonzero location in each row VectorBlock dense_col(dense,nextl_col, m); // Accumulate a column vector here // For each nnz in A[*, jj] do depth first search for (typename MatrixType::InnerIterator it(A, jj); it; ++it) { Index krow = it.row(); dense_col(krow) = it.value(); Index kmark = marker(krow); if (kmark == jj) continue; // krow visited before, go to the next nonzero dfs_kernel(jj, perm_r, nseg, panel_lsub, segrep, repfnz_col, xprune, marker, parent, xplore, glu, nextl_col, krow, traits); }// end for nonzeros in column jj } // end for column jj } } // end namespace internal } // end namespace Eigen #endif // SPARSELU_PANEL_DFS_H antimony/lib/fab/vendor/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h0000644000175000017500000000702613341130426024442 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. /* * NOTE: This file is the modified version of [s,d,c,z]copy_to_ucol.c file in SuperLU * -- SuperLU routine (version 2.0) -- * Univ. of California Berkeley, Xerox Palo Alto Research Center, * and Lawrence Berkeley National Lab. * November 15, 1997 * * Copyright (c) 1994 by Xerox Corporation. All rights reserved. * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program for any * purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is * granted, provided the above notices are retained, and a notice that * the code was modified is included with the above copyright notice. */ #ifndef SPARSELU_COPY_TO_UCOL_H #define SPARSELU_COPY_TO_UCOL_H namespace Eigen { namespace internal { /** * \brief Performs numeric block updates (sup-col) in topological order * * \param jcol current column to update * \param nseg Number of segments in the U part * \param segrep segment representative ... * \param repfnz First nonzero column in each row ... * \param perm_r Row permutation * \param dense Store the full representation of the column * \param glu Global LU data. * \return 0 - successful return * > 0 - number of bytes allocated when run out of space * */ template Index SparseLUImpl::copy_to_ucol(const Index jcol, const Index nseg, IndexVector& segrep, BlockIndexVector repfnz ,IndexVector& perm_r, BlockScalarVector dense, GlobalLU_t& glu) { Index ksub, krep, ksupno; Index jsupno = glu.supno(jcol); // For each nonzero supernode segment of U[*,j] in topological order Index k = nseg - 1, i; Index nextu = glu.xusub(jcol); Index kfnz, isub, segsize; Index new_next,irow; Index fsupc, mem; for (ksub = 0; ksub < nseg; ksub++) { krep = segrep(k); k--; ksupno = glu.supno(krep); if (jsupno != ksupno ) // should go into ucol(); { kfnz = repfnz(krep); if (kfnz != emptyIdxLU) { // Nonzero U-segment fsupc = glu.xsup(ksupno); isub = glu.xlsub(fsupc) + kfnz - fsupc; segsize = krep - kfnz + 1; new_next = nextu + segsize; while (new_next > glu.nzumax) { mem = memXpand(glu.ucol, glu.nzumax, nextu, UCOL, glu.num_expansions); if (mem) return mem; mem = memXpand(glu.usub, glu.nzumax, nextu, USUB, glu.num_expansions); if (mem) return mem; } for (i = 0; i < segsize; i++) { irow = glu.lsub(isub); glu.usub(nextu) = perm_r(irow); // Unlike the L part, the U part is stored in its final order glu.ucol(nextu) = dense(irow); dense(irow) = Scalar(0.0); nextu++; isub++; } } // end nonzero U-segment } // end if jsupno } // end for each segment glu.xusub(jcol + 1) = nextu; // close U(*,jcol) return 0; } } // namespace internal } // end namespace Eigen #endif // SPARSELU_COPY_TO_UCOL_H antimony/lib/fab/vendor/Eigen/src/SparseLU/SparseLU.h0000644000175000017500000006530013341130426021663 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam // Copyright (C) 2012 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_LU_H #define EIGEN_SPARSE_LU_H namespace Eigen { template > class SparseLU; template struct SparseLUMatrixLReturnType; template struct SparseLUMatrixUReturnType; /** \ingroup SparseLU_Module * \class SparseLU * * \brief Sparse supernodal LU factorization for general matrices * * This class implements the supernodal LU factorization for general matrices. * It uses the main techniques from the sequential SuperLU package * (http://crd-legacy.lbl.gov/~xiaoye/SuperLU/). It handles transparently real * and complex arithmetics with single and double precision, depending on the * scalar type of your input matrix. * The code has been optimized to provide BLAS-3 operations during supernode-panel updates. * It benefits directly from the built-in high-performant Eigen BLAS routines. * Moreover, when the size of a supernode is very small, the BLAS calls are avoided to * enable a better optimization from the compiler. For best performance, * you should compile it with NDEBUG flag to avoid the numerous bounds checking on vectors. * * An important parameter of this class is the ordering method. It is used to reorder the columns * (and eventually the rows) of the matrix to reduce the number of new elements that are created during * numerical factorization. The cheapest method available is COLAMD. * See \link OrderingMethods_Module the OrderingMethods module \endlink for the list of * built-in and external ordering methods. * * Simple example with key steps * \code * VectorXd x(n), b(n); * SparseMatrix A; * SparseLU, COLAMDOrdering > solver; * // fill A and b; * // Compute the ordering permutation vector from the structural pattern of A * solver.analyzePattern(A); * // Compute the numerical factorization * solver.factorize(A); * //Use the factors to solve the linear system * x = solver.solve(b); * \endcode * * \warning The input matrix A should be in a \b compressed and \b column-major form. * Otherwise an expensive copy will be made. You can call the inexpensive makeCompressed() to get a compressed matrix. * * \note Unlike the initial SuperLU implementation, there is no step to equilibrate the matrix. * For badly scaled matrices, this step can be useful to reduce the pivoting during factorization. * If this is the case for your matrices, you can try the basic scaling method at * "unsupported/Eigen/src/IterativeSolvers/Scaling.h" * * \tparam _MatrixType The type of the sparse matrix. It must be a column-major SparseMatrix<> * \tparam _OrderingType The ordering method to use, either AMD, COLAMD or METIS. Default is COLMAD * * * \sa \ref TutorialSparseDirectSolvers * \sa \ref OrderingMethods_Module */ template class SparseLU : public internal::SparseLUImpl { public: typedef _MatrixType MatrixType; typedef _OrderingType OrderingType; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; typedef SparseMatrix NCMatrix; typedef internal::MappedSuperNodalMatrix SCMatrix; typedef Matrix ScalarVector; typedef Matrix IndexVector; typedef PermutationMatrix PermutationType; typedef internal::SparseLUImpl Base; public: SparseLU():m_isInitialized(true),m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1) { initperfvalues(); } SparseLU(const MatrixType& matrix):m_isInitialized(true),m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1) { initperfvalues(); compute(matrix); } ~SparseLU() { // Free all explicit dynamic pointers } void analyzePattern (const MatrixType& matrix); void factorize (const MatrixType& matrix); void simplicialfactorize(const MatrixType& matrix); /** * Compute the symbolic and numeric factorization of the input sparse matrix. * The input matrix should be in column-major storage. */ void compute (const MatrixType& matrix) { // Analyze analyzePattern(matrix); //Factorize factorize(matrix); } inline Index rows() const { return m_mat.rows(); } inline Index cols() const { return m_mat.cols(); } /** Indicate that the pattern of the input matrix is symmetric */ void isSymmetric(bool sym) { m_symmetricmode = sym; } /** \returns an expression of the matrix L, internally stored as supernodes * The only operation available with this expression is the triangular solve * \code * y = b; matrixL().solveInPlace(y); * \endcode */ SparseLUMatrixLReturnType matrixL() const { return SparseLUMatrixLReturnType(m_Lstore); } /** \returns an expression of the matrix U, * The only operation available with this expression is the triangular solve * \code * y = b; matrixU().solveInPlace(y); * \endcode */ SparseLUMatrixUReturnType > matrixU() const { return SparseLUMatrixUReturnType >(m_Lstore, m_Ustore); } /** * \returns a reference to the row matrix permutation \f$ P_r \f$ such that \f$P_r A P_c^T = L U\f$ * \sa colsPermutation() */ inline const PermutationType& rowsPermutation() const { return m_perm_r; } /** * \returns a reference to the column matrix permutation\f$ P_c^T \f$ such that \f$P_r A P_c^T = L U\f$ * \sa rowsPermutation() */ inline const PermutationType& colsPermutation() const { return m_perm_c; } /** Set the threshold used for a diagonal entry to be an acceptable pivot. */ void setPivotThreshold(const RealScalar& thresh) { m_diagpivotthresh = thresh; } /** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A. * * \warning the destination matrix X in X = this->solve(B) must be colmun-major. * * \sa compute() */ template inline const internal::solve_retval solve(const MatrixBase& B) const { eigen_assert(m_factorizationIsOk && "SparseLU is not initialized."); eigen_assert(rows()==B.rows() && "SparseLU::solve(): invalid number of rows of the right hand side matrix B"); return internal::solve_retval(*this, B.derived()); } /** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A. * * \sa compute() */ template inline const internal::sparse_solve_retval solve(const SparseMatrixBase& B) const { eigen_assert(m_factorizationIsOk && "SparseLU is not initialized."); eigen_assert(rows()==B.rows() && "SparseLU::solve(): invalid number of rows of the right hand side matrix B"); return internal::sparse_solve_retval(*this, B.derived()); } /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was succesful, * \c NumericalIssue if the LU factorization reports a problem, zero diagonal for instance * \c InvalidInput if the input matrix is invalid * * \sa iparm() */ ComputationInfo info() const { eigen_assert(m_isInitialized && "Decomposition is not initialized."); return m_info; } /** * \returns A string describing the type of error */ std::string lastErrorMessage() const { return m_lastError; } template bool _solve(const MatrixBase &B, MatrixBase &X_base) const { Dest& X(X_base.derived()); eigen_assert(m_factorizationIsOk && "The matrix should be factorized first"); EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0, THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); // Permute the right hand side to form X = Pr*B // on return, X is overwritten by the computed solution X.resize(B.rows(),B.cols()); // this ugly const_cast_derived() helps to detect aliasing when applying the permutations for(Index j = 0; j < B.cols(); ++j) X.col(j) = rowsPermutation() * B.const_cast_derived().col(j); //Forward substitution with L this->matrixL().solveInPlace(X); this->matrixU().solveInPlace(X); // Permute back the solution for (Index j = 0; j < B.cols(); ++j) X.col(j) = colsPermutation().inverse() * X.col(j); return true; } /** * \returns the absolute value of the determinant of the matrix of which * *this is the QR decomposition. * * \warning a determinant can be very big or small, so for matrices * of large enough dimension, there is a risk of overflow/underflow. * One way to work around that is to use logAbsDeterminant() instead. * * \sa logAbsDeterminant(), signDeterminant() */ Scalar absDeterminant() { eigen_assert(m_factorizationIsOk && "The matrix should be factorized first."); // Initialize with the determinant of the row matrix Scalar det = Scalar(1.); // Note that the diagonal blocks of U are stored in supernodes, // which are available in the L part :) for (Index j = 0; j < this->cols(); ++j) { for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it) { if(it.index() == j) { det *= (std::abs)(it.value()); break; } } } return det; } /** \returns the natural log of the absolute value of the determinant of the matrix * of which **this is the QR decomposition * * \note This method is useful to work around the risk of overflow/underflow that's * inherent to the determinant computation. * * \sa absDeterminant(), signDeterminant() */ Scalar logAbsDeterminant() const { eigen_assert(m_factorizationIsOk && "The matrix should be factorized first."); Scalar det = Scalar(0.); for (Index j = 0; j < this->cols(); ++j) { for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it) { if(it.row() < j) continue; if(it.row() == j) { det += (std::log)((std::abs)(it.value())); break; } } } return det; } /** \returns A number representing the sign of the determinant * * \sa absDeterminant(), logAbsDeterminant() */ Scalar signDeterminant() { eigen_assert(m_factorizationIsOk && "The matrix should be factorized first."); return Scalar(m_detPermR); } protected: // Functions void initperfvalues() { m_perfv.panel_size = 1; m_perfv.relax = 1; m_perfv.maxsuper = 128; m_perfv.rowblk = 16; m_perfv.colblk = 8; m_perfv.fillfactor = 20; } // Variables mutable ComputationInfo m_info; bool m_isInitialized; bool m_factorizationIsOk; bool m_analysisIsOk; std::string m_lastError; NCMatrix m_mat; // The input (permuted ) matrix SCMatrix m_Lstore; // The lower triangular matrix (supernodal) MappedSparseMatrix m_Ustore; // The upper triangular matrix PermutationType m_perm_c; // Column permutation PermutationType m_perm_r ; // Row permutation IndexVector m_etree; // Column elimination tree typename Base::GlobalLU_t m_glu; // SparseLU options bool m_symmetricmode; // values for performance internal::perfvalues m_perfv; RealScalar m_diagpivotthresh; // Specifies the threshold used for a diagonal entry to be an acceptable pivot Index m_nnzL, m_nnzU; // Nonzeros in L and U factors Index m_detPermR; // Determinant of the coefficient matrix private: // Disable copy constructor SparseLU (const SparseLU& ); }; // End class SparseLU // Functions needed by the anaysis phase /** * Compute the column permutation to minimize the fill-in * * - Apply this permutation to the input matrix - * * - Compute the column elimination tree on the permuted matrix * * - Postorder the elimination tree and the column permutation * */ template void SparseLU::analyzePattern(const MatrixType& mat) { //TODO It is possible as in SuperLU to compute row and columns scaling vectors to equilibrate the matrix mat. OrderingType ord; ord(mat,m_perm_c); // Apply the permutation to the column of the input matrix //First copy the whole input matrix. m_mat = mat; if (m_perm_c.size()) { m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers. FIXME : This vector is filled but not subsequently used. //Then, permute only the column pointers const Index * outerIndexPtr; if (mat.isCompressed()) outerIndexPtr = mat.outerIndexPtr(); else { Index *outerIndexPtr_t = new Index[mat.cols()+1]; for(Index i = 0; i <= mat.cols(); i++) outerIndexPtr_t[i] = m_mat.outerIndexPtr()[i]; outerIndexPtr = outerIndexPtr_t; } for (Index i = 0; i < mat.cols(); i++) { m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i]; m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i+1] - outerIndexPtr[i]; } if(!mat.isCompressed()) delete[] outerIndexPtr; } // Compute the column elimination tree of the permuted matrix IndexVector firstRowElt; internal::coletree(m_mat, m_etree,firstRowElt); // In symmetric mode, do not do postorder here if (!m_symmetricmode) { IndexVector post, iwork; // Post order etree internal::treePostorder(m_mat.cols(), m_etree, post); // Renumber etree in postorder Index m = m_mat.cols(); iwork.resize(m+1); for (Index i = 0; i < m; ++i) iwork(post(i)) = post(m_etree(i)); m_etree = iwork; // Postmultiply A*Pc by post, i.e reorder the matrix according to the postorder of the etree PermutationType post_perm(m); for (Index i = 0; i < m; i++) post_perm.indices()(i) = post(i); // Combine the two permutations : postorder the permutation for future use if(m_perm_c.size()) { m_perm_c = post_perm * m_perm_c; } } // end postordering m_analysisIsOk = true; } // Functions needed by the numerical factorization phase /** * - Numerical factorization * - Interleaved with the symbolic factorization * On exit, info is * * = 0: successful factorization * * > 0: if info = i, and i is * * <= A->ncol: U(i,i) is exactly zero. The factorization has * been completed, but the factor U is exactly singular, * and division by zero will occur if it is used to solve a * system of equations. * * > A->ncol: number of bytes allocated when memory allocation * failure occurred, plus A->ncol. If lwork = -1, it is * the estimated amount of space needed, plus A->ncol. */ template void SparseLU::factorize(const MatrixType& matrix) { using internal::emptyIdxLU; eigen_assert(m_analysisIsOk && "analyzePattern() should be called first"); eigen_assert((matrix.rows() == matrix.cols()) && "Only for squared matrices"); typedef typename IndexVector::Scalar Index; // Apply the column permutation computed in analyzepattern() // m_mat = matrix * m_perm_c.inverse(); m_mat = matrix; if (m_perm_c.size()) { m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers. //Then, permute only the column pointers const Index * outerIndexPtr; if (matrix.isCompressed()) outerIndexPtr = matrix.outerIndexPtr(); else { Index* outerIndexPtr_t = new Index[matrix.cols()+1]; for(Index i = 0; i <= matrix.cols(); i++) outerIndexPtr_t[i] = m_mat.outerIndexPtr()[i]; outerIndexPtr = outerIndexPtr_t; } for (Index i = 0; i < matrix.cols(); i++) { m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i]; m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i+1] - outerIndexPtr[i]; } if(!matrix.isCompressed()) delete[] outerIndexPtr; } else { //FIXME This should not be needed if the empty permutation is handled transparently m_perm_c.resize(matrix.cols()); for(Index i = 0; i < matrix.cols(); ++i) m_perm_c.indices()(i) = i; } Index m = m_mat.rows(); Index n = m_mat.cols(); Index nnz = m_mat.nonZeros(); Index maxpanel = m_perfv.panel_size * m; // Allocate working storage common to the factor routines Index lwork = 0; Index info = Base::memInit(m, n, nnz, lwork, m_perfv.fillfactor, m_perfv.panel_size, m_glu); if (info) { m_lastError = "UNABLE TO ALLOCATE WORKING MEMORY\n\n" ; m_factorizationIsOk = false; return ; } // Set up pointers for integer working arrays IndexVector segrep(m); segrep.setZero(); IndexVector parent(m); parent.setZero(); IndexVector xplore(m); xplore.setZero(); IndexVector repfnz(maxpanel); IndexVector panel_lsub(maxpanel); IndexVector xprune(n); xprune.setZero(); IndexVector marker(m*internal::LUNoMarker); marker.setZero(); repfnz.setConstant(-1); panel_lsub.setConstant(-1); // Set up pointers for scalar working arrays ScalarVector dense; dense.setZero(maxpanel); ScalarVector tempv; tempv.setZero(internal::LUnumTempV(m, m_perfv.panel_size, m_perfv.maxsuper, /*m_perfv.rowblk*/m) ); // Compute the inverse of perm_c PermutationType iperm_c(m_perm_c.inverse()); // Identify initial relaxed snodes IndexVector relax_end(n); if ( m_symmetricmode == true ) Base::heap_relax_snode(n, m_etree, m_perfv.relax, marker, relax_end); else Base::relax_snode(n, m_etree, m_perfv.relax, marker, relax_end); m_perm_r.resize(m); m_perm_r.indices().setConstant(-1); marker.setConstant(-1); m_detPermR = 1; // Record the determinant of the row permutation m_glu.supno(0) = emptyIdxLU; m_glu.xsup.setConstant(0); m_glu.xsup(0) = m_glu.xlsub(0) = m_glu.xusub(0) = m_glu.xlusup(0) = Index(0); // Work on one 'panel' at a time. A panel is one of the following : // (a) a relaxed supernode at the bottom of the etree, or // (b) panel_size contiguous columns, defined by the user Index jcol; IndexVector panel_histo(n); Index pivrow; // Pivotal row number in the original row matrix Index nseg1; // Number of segments in U-column above panel row jcol Index nseg; // Number of segments in each U-column Index irep; Index i, k, jj; for (jcol = 0; jcol < n; ) { // Adjust panel size so that a panel won't overlap with the next relaxed snode. Index panel_size = m_perfv.panel_size; // upper bound on panel width for (k = jcol + 1; k < (std::min)(jcol+panel_size, n); k++) { if (relax_end(k) != emptyIdxLU) { panel_size = k - jcol; break; } } if (k == n) panel_size = n - jcol; // Symbolic outer factorization on a panel of columns Base::panel_dfs(m, panel_size, jcol, m_mat, m_perm_r.indices(), nseg1, dense, panel_lsub, segrep, repfnz, xprune, marker, parent, xplore, m_glu); // Numeric sup-panel updates in topological order Base::panel_bmod(m, panel_size, jcol, nseg1, dense, tempv, segrep, repfnz, m_glu); // Sparse LU within the panel, and below the panel diagonal for ( jj = jcol; jj< jcol + panel_size; jj++) { k = (jj - jcol) * m; // Column index for w-wide arrays nseg = nseg1; // begin after all the panel segments //Depth-first-search for the current column VectorBlock panel_lsubk(panel_lsub, k, m); VectorBlock repfnz_k(repfnz, k, m); info = Base::column_dfs(m, jj, m_perm_r.indices(), m_perfv.maxsuper, nseg, panel_lsubk, segrep, repfnz_k, xprune, marker, parent, xplore, m_glu); if ( info ) { m_lastError = "UNABLE TO EXPAND MEMORY IN COLUMN_DFS() "; m_info = NumericalIssue; m_factorizationIsOk = false; return; } // Numeric updates to this column VectorBlock dense_k(dense, k, m); VectorBlock segrep_k(segrep, nseg1, m-nseg1); info = Base::column_bmod(jj, (nseg - nseg1), dense_k, tempv, segrep_k, repfnz_k, jcol, m_glu); if ( info ) { m_lastError = "UNABLE TO EXPAND MEMORY IN COLUMN_BMOD() "; m_info = NumericalIssue; m_factorizationIsOk = false; return; } // Copy the U-segments to ucol(*) info = Base::copy_to_ucol(jj, nseg, segrep, repfnz_k ,m_perm_r.indices(), dense_k, m_glu); if ( info ) { m_lastError = "UNABLE TO EXPAND MEMORY IN COPY_TO_UCOL() "; m_info = NumericalIssue; m_factorizationIsOk = false; return; } // Form the L-segment info = Base::pivotL(jj, m_diagpivotthresh, m_perm_r.indices(), iperm_c.indices(), pivrow, m_glu); if ( info ) { m_lastError = "THE MATRIX IS STRUCTURALLY SINGULAR ... ZERO COLUMN AT "; std::ostringstream returnInfo; returnInfo << info; m_lastError += returnInfo.str(); m_info = NumericalIssue; m_factorizationIsOk = false; return; } // Update the determinant of the row permutation matrix if (pivrow != jj) m_detPermR *= -1; // Prune columns (0:jj-1) using column jj Base::pruneL(jj, m_perm_r.indices(), pivrow, nseg, segrep, repfnz_k, xprune, m_glu); // Reset repfnz for this column for (i = 0; i < nseg; i++) { irep = segrep(i); repfnz_k(irep) = emptyIdxLU; } } // end SparseLU within the panel jcol += panel_size; // Move to the next panel } // end for -- end elimination // Count the number of nonzeros in factors Base::countnz(n, m_nnzL, m_nnzU, m_glu); // Apply permutation to the L subscripts Base::fixupL(n, m_perm_r.indices(), m_glu); // Create supernode matrix L m_Lstore.setInfos(m, n, m_glu.lusup, m_glu.xlusup, m_glu.lsub, m_glu.xlsub, m_glu.supno, m_glu.xsup); // Create the column major upper sparse matrix U; new (&m_Ustore) MappedSparseMatrix ( m, n, m_nnzU, m_glu.xusub.data(), m_glu.usub.data(), m_glu.ucol.data() ); m_info = Success; m_factorizationIsOk = true; } template struct SparseLUMatrixLReturnType : internal::no_assignment_operator { typedef typename MappedSupernodalType::Index Index; typedef typename MappedSupernodalType::Scalar Scalar; SparseLUMatrixLReturnType(const MappedSupernodalType& mapL) : m_mapL(mapL) { } Index rows() { return m_mapL.rows(); } Index cols() { return m_mapL.cols(); } template void solveInPlace( MatrixBase &X) const { m_mapL.solveInPlace(X); } const MappedSupernodalType& m_mapL; }; template struct SparseLUMatrixUReturnType : internal::no_assignment_operator { typedef typename MatrixLType::Index Index; typedef typename MatrixLType::Scalar Scalar; SparseLUMatrixUReturnType(const MatrixLType& mapL, const MatrixUType& mapU) : m_mapL(mapL),m_mapU(mapU) { } Index rows() { return m_mapL.rows(); } Index cols() { return m_mapL.cols(); } template void solveInPlace(MatrixBase &X) const { Index nrhs = X.cols(); Index n = X.rows(); // Backward solve with U for (Index k = m_mapL.nsuper(); k >= 0; k--) { Index fsupc = m_mapL.supToCol()[k]; Index lda = m_mapL.colIndexPtr()[fsupc+1] - m_mapL.colIndexPtr()[fsupc]; // leading dimension Index nsupc = m_mapL.supToCol()[k+1] - fsupc; Index luptr = m_mapL.colIndexPtr()[fsupc]; if (nsupc == 1) { for (Index j = 0; j < nrhs; j++) { X(fsupc, j) /= m_mapL.valuePtr()[luptr]; } } else { Map, 0, OuterStride<> > A( &(m_mapL.valuePtr()[luptr]), nsupc, nsupc, OuterStride<>(lda) ); Map< Matrix, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) ); U = A.template triangularView().solve(U); } for (Index j = 0; j < nrhs; ++j) { for (Index jcol = fsupc; jcol < fsupc + nsupc; jcol++) { typename MatrixUType::InnerIterator it(m_mapU, jcol); for ( ; it; ++it) { Index irow = it.index(); X(irow, j) -= X(jcol, j) * it.value(); } } } } // End For U-solve } const MatrixLType& m_mapL; const MatrixUType& m_mapU; }; namespace internal { template struct solve_retval, Rhs> : solve_retval_base, Rhs> { typedef SparseLU<_MatrixType,Derived> Dec; EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) template void evalTo(Dest& dst) const { dec()._solve(rhs(),dst); } }; template struct sparse_solve_retval, Rhs> : sparse_solve_retval_base, Rhs> { typedef SparseLU<_MatrixType,Derived> Dec; EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs) template void evalTo(Dest& dst) const { this->defaultEvalTo(dst); } }; } // end namespace internal } // End namespace Eigen #endif antimony/lib/fab/vendor/Eigen/src/SparseLU/SparseLU_column_bmod.h0000644000175000017500000001506413341130426024243 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam // Copyright (C) 2012 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. /* * NOTE: This file is the modified version of xcolumn_bmod.c file in SuperLU * -- SuperLU routine (version 3.0) -- * Univ. of California Berkeley, Xerox Palo Alto Research Center, * and Lawrence Berkeley National Lab. * October 15, 2003 * * Copyright (c) 1994 by Xerox Corporation. All rights reserved. * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program for any * purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is * granted, provided the above notices are retained, and a notice that * the code was modified is included with the above copyright notice. */ #ifndef SPARSELU_COLUMN_BMOD_H #define SPARSELU_COLUMN_BMOD_H namespace Eigen { namespace internal { /** * \brief Performs numeric block updates (sup-col) in topological order * * \param jcol current column to update * \param nseg Number of segments in the U part * \param dense Store the full representation of the column * \param tempv working array * \param segrep segment representative ... * \param repfnz ??? First nonzero column in each row ??? ... * \param fpanelc First column in the current panel * \param glu Global LU data. * \return 0 - successful return * > 0 - number of bytes allocated when run out of space * */ template Index SparseLUImpl::column_bmod(const Index jcol, const Index nseg, BlockScalarVector dense, ScalarVector& tempv, BlockIndexVector segrep, BlockIndexVector repfnz, Index fpanelc, GlobalLU_t& glu) { Index jsupno, k, ksub, krep, ksupno; Index lptr, nrow, isub, irow, nextlu, new_next, ufirst; Index fsupc, nsupc, nsupr, luptr, kfnz, no_zeros; /* krep = representative of current k-th supernode * fsupc = first supernodal column * nsupc = number of columns in a supernode * nsupr = number of rows in a supernode * luptr = location of supernodal LU-block in storage * kfnz = first nonz in the k-th supernodal segment * no_zeros = no lf leading zeros in a supernodal U-segment */ jsupno = glu.supno(jcol); // For each nonzero supernode segment of U[*,j] in topological order k = nseg - 1; Index d_fsupc; // distance between the first column of the current panel and the // first column of the current snode Index fst_col; // First column within small LU update Index segsize; for (ksub = 0; ksub < nseg; ksub++) { krep = segrep(k); k--; ksupno = glu.supno(krep); if (jsupno != ksupno ) { // outside the rectangular supernode fsupc = glu.xsup(ksupno); fst_col = (std::max)(fsupc, fpanelc); // Distance from the current supernode to the current panel; // d_fsupc = 0 if fsupc > fpanelc d_fsupc = fst_col - fsupc; luptr = glu.xlusup(fst_col) + d_fsupc; lptr = glu.xlsub(fsupc) + d_fsupc; kfnz = repfnz(krep); kfnz = (std::max)(kfnz, fpanelc); segsize = krep - kfnz + 1; nsupc = krep - fst_col + 1; nsupr = glu.xlsub(fsupc+1) - glu.xlsub(fsupc); nrow = nsupr - d_fsupc - nsupc; Index lda = glu.xlusup(fst_col+1) - glu.xlusup(fst_col); // Perform a triangular solver and block update, // then scatter the result of sup-col update to dense no_zeros = kfnz - fst_col; if(segsize==1) LU_kernel_bmod<1>::run(segsize, dense, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros); else LU_kernel_bmod::run(segsize, dense, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros); } // end if jsupno } // end for each segment // Process the supernodal portion of L\U[*,j] nextlu = glu.xlusup(jcol); fsupc = glu.xsup(jsupno); // copy the SPA dense into L\U[*,j] Index mem; new_next = nextlu + glu.xlsub(fsupc + 1) - glu.xlsub(fsupc); Index offset = internal::first_multiple(new_next, internal::packet_traits::size) - new_next; if(offset) new_next += offset; while (new_next > glu.nzlumax ) { mem = memXpand(glu.lusup, glu.nzlumax, nextlu, LUSUP, glu.num_expansions); if (mem) return mem; } for (isub = glu.xlsub(fsupc); isub < glu.xlsub(fsupc+1); isub++) { irow = glu.lsub(isub); glu.lusup(nextlu) = dense(irow); dense(irow) = Scalar(0.0); ++nextlu; } if(offset) { glu.lusup.segment(nextlu,offset).setZero(); nextlu += offset; } glu.xlusup(jcol + 1) = nextlu; // close L\U(*,jcol); /* For more updates within the panel (also within the current supernode), * should start from the first column of the panel, or the first column * of the supernode, whichever is bigger. There are two cases: * 1) fsupc < fpanelc, then fst_col <-- fpanelc * 2) fsupc >= fpanelc, then fst_col <-- fsupc */ fst_col = (std::max)(fsupc, fpanelc); if (fst_col < jcol) { // Distance between the current supernode and the current panel // d_fsupc = 0 if fsupc >= fpanelc d_fsupc = fst_col - fsupc; lptr = glu.xlsub(fsupc) + d_fsupc; luptr = glu.xlusup(fst_col) + d_fsupc; nsupr = glu.xlsub(fsupc+1) - glu.xlsub(fsupc); // leading dimension nsupc = jcol - fst_col; // excluding jcol nrow = nsupr - d_fsupc - nsupc; // points to the beginning of jcol in snode L\U(jsupno) ufirst = glu.xlusup(jcol) + d_fsupc; Index lda = glu.xlusup(jcol+1) - glu.xlusup(jcol); Map, 0, OuterStride<> > A( &(glu.lusup.data()[luptr]), nsupc, nsupc, OuterStride<>(lda) ); VectorBlock u(glu.lusup, ufirst, nsupc); u = A.template triangularView().solve(u); new (&A) Map, 0, OuterStride<> > ( &(glu.lusup.data()[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) ); VectorBlock l(glu.lusup, ufirst+nsupc, nrow); l.noalias() -= A * u; } // End if fst_col return 0; } } // end namespace internal } // end namespace Eigen #endif // SPARSELU_COLUMN_BMOD_H antimony/lib/fab/vendor/Eigen/src/SparseLU/SparseLU_relax_snode.h0000644000175000017500000000544413341130426024251 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. /* This file is a modified version of heap_relax_snode.c file in SuperLU * -- SuperLU routine (version 3.0) -- * Univ. of California Berkeley, Xerox Palo Alto Research Center, * and Lawrence Berkeley National Lab. * October 15, 2003 * * Copyright (c) 1994 by Xerox Corporation. All rights reserved. * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program for any * purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is * granted, provided the above notices are retained, and a notice that * the code was modified is included with the above copyright notice. */ #ifndef SPARSELU_RELAX_SNODE_H #define SPARSELU_RELAX_SNODE_H namespace Eigen { namespace internal { /** * \brief Identify the initial relaxed supernodes * * This routine is applied to a column elimination tree. * It assumes that the matrix has been reordered according to the postorder of the etree * \param n the number of columns * \param et elimination tree * \param relax_columns Maximum number of columns allowed in a relaxed snode * \param descendants Number of descendants of each node in the etree * \param relax_end last column in a supernode */ template void SparseLUImpl::relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end) { // compute the number of descendants of each node in the etree Index j, parent; relax_end.setConstant(emptyIdxLU); descendants.setZero(); for (j = 0; j < n; j++) { parent = et(j); if (parent != n) // not the dummy root descendants(parent) += descendants(j) + 1; } // Identify the relaxed supernodes by postorder traversal of the etree Index snode_start; // beginning of a snode for (j = 0; j < n; ) { parent = et(j); snode_start = j; while ( parent != n && descendants(parent) < relax_columns ) { j = parent; parent = et(j); } // Found a supernode in postordered etree, j is the last column relax_end(snode_start) = j; // Record last column j++; // Search for a new leaf while (descendants(j) != 0 && j < n) j++; } // End postorder traversal of the etree } } // end namespace internal } // end namespace Eigen #endif antimony/lib/fab/vendor/Eigen/src/SparseLU/SparseLU_Structs.h0000644000175000017500000001160113341130426023405 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. /* * NOTE: This file comes from a partly modified version of files slu_[s,d,c,z]defs.h * -- SuperLU routine (version 4.1) -- * Univ. of California Berkeley, Xerox Palo Alto Research Center, * and Lawrence Berkeley National Lab. * November, 2010 * * Global data structures used in LU factorization - * * nsuper: #supernodes = nsuper + 1, numbered [0, nsuper]. * (xsup,supno): supno[i] is the supernode no to which i belongs; * xsup(s) points to the beginning of the s-th supernode. * e.g. supno 0 1 2 2 3 3 3 4 4 4 4 4 (n=12) * xsup 0 1 2 4 7 12 * Note: dfs will be performed on supernode rep. relative to the new * row pivoting ordering * * (xlsub,lsub): lsub[*] contains the compressed subscript of * rectangular supernodes; xlsub[j] points to the starting * location of the j-th column in lsub[*]. Note that xlsub * is indexed by column. * Storage: original row subscripts * * During the course of sparse LU factorization, we also use * (xlsub,lsub) for the purpose of symmetric pruning. For each * supernode {s,s+1,...,t=s+r} with first column s and last * column t, the subscript set * lsub[j], j=xlsub[s], .., xlsub[s+1]-1 * is the structure of column s (i.e. structure of this supernode). * It is used for the storage of numerical values. * Furthermore, * lsub[j], j=xlsub[t], .., xlsub[t+1]-1 * is the structure of the last column t of this supernode. * It is for the purpose of symmetric pruning. Therefore, the * structural subscripts can be rearranged without making physical * interchanges among the numerical values. * * However, if the supernode has only one column, then we * only keep one set of subscripts. For any subscript interchange * performed, similar interchange must be done on the numerical * values. * * The last column structures (for pruning) will be removed * after the numercial LU factorization phase. * * (xlusup,lusup): lusup[*] contains the numerical values of the * rectangular supernodes; xlusup[j] points to the starting * location of the j-th column in storage vector lusup[*] * Note: xlusup is indexed by column. * Each rectangular supernode is stored by column-major * scheme, consistent with Fortran 2-dim array storage. * * (xusub,ucol,usub): ucol[*] stores the numerical values of * U-columns outside the rectangular supernodes. The row * subscript of nonzero ucol[k] is stored in usub[k]. * xusub[i] points to the starting location of column i in ucol. * Storage: new row subscripts; that is subscripts of PA. */ #ifndef EIGEN_LU_STRUCTS #define EIGEN_LU_STRUCTS namespace Eigen { namespace internal { typedef enum {LUSUP, UCOL, LSUB, USUB, LLVL, ULVL} MemType; template struct LU_GlobalLU_t { typedef typename IndexVector::Scalar Index; IndexVector xsup; //First supernode column ... xsup(s) points to the beginning of the s-th supernode IndexVector supno; // Supernode number corresponding to this column (column to supernode mapping) ScalarVector lusup; // nonzero values of L ordered by columns IndexVector lsub; // Compressed row indices of L rectangular supernodes. IndexVector xlusup; // pointers to the beginning of each column in lusup IndexVector xlsub; // pointers to the beginning of each column in lsub Index nzlmax; // Current max size of lsub Index nzlumax; // Current max size of lusup ScalarVector ucol; // nonzero values of U ordered by columns IndexVector usub; // row indices of U columns in ucol IndexVector xusub; // Pointers to the beginning of each column of U in ucol Index nzumax; // Current max size of ucol Index n; // Number of columns in the matrix Index num_expansions; }; // Values to set for performance template struct perfvalues { Index panel_size; // a panel consists of at most consecutive columns Index relax; // To control degree of relaxing supernodes. If the number of nodes (columns) // in a subtree of the elimination tree is less than relax, this subtree is considered // as one supernode regardless of the row structures of those columns Index maxsuper; // The maximum size for a supernode in complete LU Index rowblk; // The minimum row dimension for 2-D blocking to be used; Index colblk; // The minimum column dimension for 2-D blocking to be used; Index fillfactor; // The estimated fills factors for L and U, compared with A }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_LU_STRUCTS antimony/lib/fab/vendor/Eigen/src/SparseLU/SparseLUImpl.h0000644000175000017500000001004513341130426022501 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef SPARSELU_IMPL_H #define SPARSELU_IMPL_H namespace Eigen { namespace internal { /** \ingroup SparseLU_Module * \class SparseLUImpl * Base class for sparseLU */ template class SparseLUImpl { public: typedef Matrix ScalarVector; typedef Matrix IndexVector; typedef typename ScalarVector::RealScalar RealScalar; typedef Ref > BlockScalarVector; typedef Ref > BlockIndexVector; typedef LU_GlobalLU_t GlobalLU_t; typedef SparseMatrix MatrixType; protected: template Index expand(VectorType& vec, Index& length, Index nbElts, Index keep_prev, Index& num_expansions); Index memInit(Index m, Index n, Index annz, Index lwork, Index fillratio, Index panel_size, GlobalLU_t& glu); template Index memXpand(VectorType& vec, Index& maxlen, Index nbElts, MemType memtype, Index& num_expansions); void heap_relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end); void relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end); Index snode_dfs(const Index jcol, const Index kcol,const MatrixType& mat, IndexVector& xprune, IndexVector& marker, GlobalLU_t& glu); Index snode_bmod (const Index jcol, const Index fsupc, ScalarVector& dense, GlobalLU_t& glu); Index pivotL(const Index jcol, const RealScalar& diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, Index& pivrow, GlobalLU_t& glu); template void dfs_kernel(const Index jj, IndexVector& perm_r, Index& nseg, IndexVector& panel_lsub, IndexVector& segrep, Ref repfnz_col, IndexVector& xprune, Ref marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu, Index& nextl_col, Index krow, Traits& traits); void panel_dfs(const Index m, const Index w, const Index jcol, MatrixType& A, IndexVector& perm_r, Index& nseg, ScalarVector& dense, IndexVector& panel_lsub, IndexVector& segrep, IndexVector& repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu); void panel_bmod(const Index m, const Index w, const Index jcol, const Index nseg, ScalarVector& dense, ScalarVector& tempv, IndexVector& segrep, IndexVector& repfnz, GlobalLU_t& glu); Index column_dfs(const Index m, const Index jcol, IndexVector& perm_r, Index maxsuper, Index& nseg, BlockIndexVector lsub_col, IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu); Index column_bmod(const Index jcol, const Index nseg, BlockScalarVector dense, ScalarVector& tempv, BlockIndexVector segrep, BlockIndexVector repfnz, Index fpanelc, GlobalLU_t& glu); Index copy_to_ucol(const Index jcol, const Index nseg, IndexVector& segrep, BlockIndexVector repfnz ,IndexVector& perm_r, BlockScalarVector dense, GlobalLU_t& glu); void pruneL(const Index jcol, const IndexVector& perm_r, const Index pivrow, const Index nseg, const IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, GlobalLU_t& glu); void countnz(const Index n, Index& nnzL, Index& nnzU, GlobalLU_t& glu); void fixupL(const Index n, const IndexVector& perm_r, GlobalLU_t& glu); template friend struct column_dfs_traits; }; } // end namespace internal } // namespace Eigen #endif antimony/lib/fab/vendor/Eigen/src/SparseLU/SparseLU_Memory.h0000644000175000017500000001665013341130426023217 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. /* * NOTE: This file is the modified version of [s,d,c,z]memory.c files in SuperLU * -- SuperLU routine (version 3.1) -- * Univ. of California Berkeley, Xerox Palo Alto Research Center, * and Lawrence Berkeley National Lab. * August 1, 2008 * * Copyright (c) 1994 by Xerox Corporation. All rights reserved. * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program for any * purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is * granted, provided the above notices are retained, and a notice that * the code was modified is included with the above copyright notice. */ #ifndef EIGEN_SPARSELU_MEMORY #define EIGEN_SPARSELU_MEMORY namespace Eigen { namespace internal { enum { LUNoMarker = 3 }; enum {emptyIdxLU = -1}; template inline Index LUnumTempV(Index& m, Index& w, Index& t, Index& b) { return (std::max)(m, (t+b)*w); } template< typename Scalar, typename Index> inline Index LUTempSpace(Index&m, Index& w) { return (2*w + 4 + LUNoMarker) * m * sizeof(Index) + (w + 1) * m * sizeof(Scalar); } /** * Expand the existing storage to accomodate more fill-ins * \param vec Valid pointer to the vector to allocate or expand * \param[in,out] length At input, contain the current length of the vector that is to be increased. At output, length of the newly allocated vector * \param[in] nbElts Current number of elements in the factors * \param keep_prev 1: use length and do not expand the vector; 0: compute new_len and expand * \param[in,out] num_expansions Number of times the memory has been expanded */ template template Index SparseLUImpl::expand(VectorType& vec, Index& length, Index nbElts, Index keep_prev, Index& num_expansions) { float alpha = 1.5; // Ratio of the memory increase Index new_len; // New size of the allocated memory if(num_expansions == 0 || keep_prev) new_len = length ; // First time allocate requested else new_len = (std::max)(length+1,Index(alpha * length)); VectorType old_vec; // Temporary vector to hold the previous values if (nbElts > 0 ) old_vec = vec.segment(0,nbElts); //Allocate or expand the current vector #ifdef EIGEN_EXCEPTIONS try #endif { vec.resize(new_len); } #ifdef EIGEN_EXCEPTIONS catch(std::bad_alloc& ) #else if(!vec.size()) #endif { if (!num_expansions) { // First time to allocate from LUMemInit() // Let LUMemInit() deals with it. return -1; } if (keep_prev) { // In this case, the memory length should not not be reduced return new_len; } else { // Reduce the size and increase again Index tries = 0; // Number of attempts do { alpha = (alpha + 1)/2; new_len = (std::max)(length+1,Index(alpha * length)); #ifdef EIGEN_EXCEPTIONS try #endif { vec.resize(new_len); } #ifdef EIGEN_EXCEPTIONS catch(std::bad_alloc& ) #else if (!vec.size()) #endif { tries += 1; if ( tries > 10) return new_len; } } while (!vec.size()); } } //Copy the previous values to the newly allocated space if (nbElts > 0) vec.segment(0, nbElts) = old_vec; length = new_len; if(num_expansions) ++num_expansions; return 0; } /** * \brief Allocate various working space for the numerical factorization phase. * \param m number of rows of the input matrix * \param n number of columns * \param annz number of initial nonzeros in the matrix * \param lwork if lwork=-1, this routine returns an estimated size of the required memory * \param glu persistent data to facilitate multiple factors : will be deleted later ?? * \param fillratio estimated ratio of fill in the factors * \param panel_size Size of a panel * \return an estimated size of the required memory if lwork = -1; otherwise, return the size of actually allocated memory when allocation failed, and 0 on success * \note Unlike SuperLU, this routine does not support successive factorization with the same pattern and the same row permutation */ template Index SparseLUImpl::memInit(Index m, Index n, Index annz, Index lwork, Index fillratio, Index panel_size, GlobalLU_t& glu) { Index& num_expansions = glu.num_expansions; //No memory expansions so far num_expansions = 0; glu.nzumax = glu.nzlumax = (std::min)(fillratio * annz / n, m) * n; // estimated number of nonzeros in U glu.nzlmax = (std::max)(Index(4), fillratio) * annz / 4; // estimated nnz in L factor // Return the estimated size to the user if necessary Index tempSpace; tempSpace = (2*panel_size + 4 + LUNoMarker) * m * sizeof(Index) + (panel_size + 1) * m * sizeof(Scalar); if (lwork == emptyIdxLU) { Index estimated_size; estimated_size = (5 * n + 5) * sizeof(Index) + tempSpace + (glu.nzlmax + glu.nzumax) * sizeof(Index) + (glu.nzlumax+glu.nzumax) * sizeof(Scalar) + n; return estimated_size; } // Setup the required space // First allocate Integer pointers for L\U factors glu.xsup.resize(n+1); glu.supno.resize(n+1); glu.xlsub.resize(n+1); glu.xlusup.resize(n+1); glu.xusub.resize(n+1); // Reserve memory for L/U factors do { if( (expand(glu.lusup, glu.nzlumax, 0, 0, num_expansions)<0) || (expand(glu.ucol, glu.nzumax, 0, 0, num_expansions)<0) || (expand (glu.lsub, glu.nzlmax, 0, 0, num_expansions)<0) || (expand (glu.usub, glu.nzumax, 0, 1, num_expansions)<0) ) { //Reduce the estimated size and retry glu.nzlumax /= 2; glu.nzumax /= 2; glu.nzlmax /= 2; if (glu.nzlumax < annz ) return glu.nzlumax; } } while (!glu.lusup.size() || !glu.ucol.size() || !glu.lsub.size() || !glu.usub.size()); ++num_expansions; return 0; } // end LuMemInit /** * \brief Expand the existing storage * \param vec vector to expand * \param[in,out] maxlen On input, previous size of vec (Number of elements to copy ). on output, new size * \param nbElts current number of elements in the vector. * \param memtype Type of the element to expand * \param num_expansions Number of expansions * \return 0 on success, > 0 size of the memory allocated so far */ template template Index SparseLUImpl::memXpand(VectorType& vec, Index& maxlen, Index nbElts, MemType memtype, Index& num_expansions) { Index failed_size; if (memtype == USUB) failed_size = this->expand(vec, maxlen, nbElts, 1, num_expansions); else failed_size = this->expand(vec, maxlen, nbElts, 0, num_expansions); if (failed_size) return failed_size; return 0 ; } } // end namespace internal } // end namespace Eigen #endif // EIGEN_SPARSELU_MEMORY antimony/lib/fab/vendor/Eigen/src/SVD/0000755000175000017500000000000013341130426016746 5ustar tiagotiagoantimony/lib/fab/vendor/Eigen/src/SVD/CMakeLists.txt0000644000175000017500000000021213341130426021501 0ustar tiagotiagoFILE(GLOB Eigen_SVD_SRCS "*.h") INSTALL(FILES ${Eigen_SVD_SRCS} DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/SVD COMPONENT Devel ) antimony/lib/fab/vendor/Eigen/src/SVD/JacobiSVD.h0000644000175000017500000011466113341130426020674 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2010 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_JACOBISVD_H #define EIGEN_JACOBISVD_H namespace Eigen { namespace internal { // forward declaration (needed by ICC) // the empty body is required by MSVC template::IsComplex> struct svd_precondition_2x2_block_to_be_real {}; /*** QR preconditioners (R-SVD) *** *** Their role is to reduce the problem of computing the SVD to the case of a square matrix. *** This approach, known as R-SVD, is an optimization for rectangular-enough matrices, and is a requirement for *** JacobiSVD which by itself is only able to work on square matrices. ***/ enum { PreconditionIfMoreColsThanRows, PreconditionIfMoreRowsThanCols }; template struct qr_preconditioner_should_do_anything { enum { a = MatrixType::RowsAtCompileTime != Dynamic && MatrixType::ColsAtCompileTime != Dynamic && MatrixType::ColsAtCompileTime <= MatrixType::RowsAtCompileTime, b = MatrixType::RowsAtCompileTime != Dynamic && MatrixType::ColsAtCompileTime != Dynamic && MatrixType::RowsAtCompileTime <= MatrixType::ColsAtCompileTime, ret = !( (QRPreconditioner == NoQRPreconditioner) || (Case == PreconditionIfMoreColsThanRows && bool(a)) || (Case == PreconditionIfMoreRowsThanCols && bool(b)) ) }; }; template::ret > struct qr_preconditioner_impl {}; template class qr_preconditioner_impl { public: typedef typename MatrixType::Index Index; void allocate(const JacobiSVD&) {} bool run(JacobiSVD&, const MatrixType&) { return false; } }; /*** preconditioner using FullPivHouseholderQR ***/ template class qr_preconditioner_impl { public: typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime }; typedef Matrix WorkspaceType; void allocate(const JacobiSVD& svd) { if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols()) { m_qr.~QRType(); ::new (&m_qr) QRType(svd.rows(), svd.cols()); } if (svd.m_computeFullU) m_workspace.resize(svd.rows()); } bool run(JacobiSVD& svd, const MatrixType& matrix) { if(matrix.rows() > matrix.cols()) { m_qr.compute(matrix); svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView(); if(svd.m_computeFullU) m_qr.matrixQ().evalTo(svd.m_matrixU, m_workspace); if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation(); return true; } return false; } private: typedef FullPivHouseholderQR QRType; QRType m_qr; WorkspaceType m_workspace; }; template class qr_preconditioner_impl { public: typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, Options = MatrixType::Options }; typedef Matrix TransposeTypeWithSameStorageOrder; void allocate(const JacobiSVD& svd) { if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols()) { m_qr.~QRType(); ::new (&m_qr) QRType(svd.cols(), svd.rows()); } m_adjoint.resize(svd.cols(), svd.rows()); if (svd.m_computeFullV) m_workspace.resize(svd.cols()); } bool run(JacobiSVD& svd, const MatrixType& matrix) { if(matrix.cols() > matrix.rows()) { m_adjoint = matrix.adjoint(); m_qr.compute(m_adjoint); svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView().adjoint(); if(svd.m_computeFullV) m_qr.matrixQ().evalTo(svd.m_matrixV, m_workspace); if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation(); return true; } else return false; } private: typedef FullPivHouseholderQR QRType; QRType m_qr; TransposeTypeWithSameStorageOrder m_adjoint; typename internal::plain_row_type::type m_workspace; }; /*** preconditioner using ColPivHouseholderQR ***/ template class qr_preconditioner_impl { public: typedef typename MatrixType::Index Index; void allocate(const JacobiSVD& svd) { if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols()) { m_qr.~QRType(); ::new (&m_qr) QRType(svd.rows(), svd.cols()); } if (svd.m_computeFullU) m_workspace.resize(svd.rows()); else if (svd.m_computeThinU) m_workspace.resize(svd.cols()); } bool run(JacobiSVD& svd, const MatrixType& matrix) { if(matrix.rows() > matrix.cols()) { m_qr.compute(matrix); svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView(); if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace); else if(svd.m_computeThinU) { svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols()); m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace); } if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation(); return true; } return false; } private: typedef ColPivHouseholderQR QRType; QRType m_qr; typename internal::plain_col_type::type m_workspace; }; template class qr_preconditioner_impl { public: typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, Options = MatrixType::Options }; typedef Matrix TransposeTypeWithSameStorageOrder; void allocate(const JacobiSVD& svd) { if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols()) { m_qr.~QRType(); ::new (&m_qr) QRType(svd.cols(), svd.rows()); } if (svd.m_computeFullV) m_workspace.resize(svd.cols()); else if (svd.m_computeThinV) m_workspace.resize(svd.rows()); m_adjoint.resize(svd.cols(), svd.rows()); } bool run(JacobiSVD& svd, const MatrixType& matrix) { if(matrix.cols() > matrix.rows()) { m_adjoint = matrix.adjoint(); m_qr.compute(m_adjoint); svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView().adjoint(); if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace); else if(svd.m_computeThinV) { svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows()); m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace); } if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation(); return true; } else return false; } private: typedef ColPivHouseholderQR QRType; QRType m_qr; TransposeTypeWithSameStorageOrder m_adjoint; typename internal::plain_row_type::type m_workspace; }; /*** preconditioner using HouseholderQR ***/ template class qr_preconditioner_impl { public: typedef typename MatrixType::Index Index; void allocate(const JacobiSVD& svd) { if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols()) { m_qr.~QRType(); ::new (&m_qr) QRType(svd.rows(), svd.cols()); } if (svd.m_computeFullU) m_workspace.resize(svd.rows()); else if (svd.m_computeThinU) m_workspace.resize(svd.cols()); } bool run(JacobiSVD& svd, const MatrixType& matrix) { if(matrix.rows() > matrix.cols()) { m_qr.compute(matrix); svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView(); if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace); else if(svd.m_computeThinU) { svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols()); m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace); } if(svd.computeV()) svd.m_matrixV.setIdentity(matrix.cols(), matrix.cols()); return true; } return false; } private: typedef HouseholderQR QRType; QRType m_qr; typename internal::plain_col_type::type m_workspace; }; template class qr_preconditioner_impl { public: typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, Options = MatrixType::Options }; typedef Matrix TransposeTypeWithSameStorageOrder; void allocate(const JacobiSVD& svd) { if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols()) { m_qr.~QRType(); ::new (&m_qr) QRType(svd.cols(), svd.rows()); } if (svd.m_computeFullV) m_workspace.resize(svd.cols()); else if (svd.m_computeThinV) m_workspace.resize(svd.rows()); m_adjoint.resize(svd.cols(), svd.rows()); } bool run(JacobiSVD& svd, const MatrixType& matrix) { if(matrix.cols() > matrix.rows()) { m_adjoint = matrix.adjoint(); m_qr.compute(m_adjoint); svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView().adjoint(); if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace); else if(svd.m_computeThinV) { svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows()); m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace); } if(svd.computeU()) svd.m_matrixU.setIdentity(matrix.rows(), matrix.rows()); return true; } else return false; } private: typedef HouseholderQR QRType; QRType m_qr; TransposeTypeWithSameStorageOrder m_adjoint; typename internal::plain_row_type::type m_workspace; }; /*** 2x2 SVD implementation *** *** JacobiSVD consists in performing a series of 2x2 SVD subproblems ***/ template struct svd_precondition_2x2_block_to_be_real { typedef JacobiSVD SVD; typedef typename SVD::Index Index; static void run(typename SVD::WorkMatrixType&, SVD&, Index, Index) {} }; template struct svd_precondition_2x2_block_to_be_real { typedef JacobiSVD SVD; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename SVD::Index Index; static void run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q) { using std::sqrt; Scalar z; JacobiRotation rot; RealScalar n = sqrt(numext::abs2(work_matrix.coeff(p,p)) + numext::abs2(work_matrix.coeff(q,p))); if(n==0) { z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q); work_matrix.row(p) *= z; if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z); if(work_matrix.coeff(q,q)!=Scalar(0)) { z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q); work_matrix.row(q) *= z; if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z); } // otherwise the second row is already zero, so we have nothing to do. } else { rot.c() = conj(work_matrix.coeff(p,p)) / n; rot.s() = work_matrix.coeff(q,p) / n; work_matrix.applyOnTheLeft(p,q,rot); if(svd.computeU()) svd.m_matrixU.applyOnTheRight(p,q,rot.adjoint()); if(work_matrix.coeff(p,q) != Scalar(0)) { Scalar z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q); work_matrix.col(q) *= z; if(svd.computeV()) svd.m_matrixV.col(q) *= z; } if(work_matrix.coeff(q,q) != Scalar(0)) { z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q); work_matrix.row(q) *= z; if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z); } } } }; template void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q, JacobiRotation *j_left, JacobiRotation *j_right) { using std::sqrt; using std::abs; Matrix m; m << numext::real(matrix.coeff(p,p)), numext::real(matrix.coeff(p,q)), numext::real(matrix.coeff(q,p)), numext::real(matrix.coeff(q,q)); JacobiRotation rot1; RealScalar t = m.coeff(0,0) + m.coeff(1,1); RealScalar d = m.coeff(1,0) - m.coeff(0,1); if(t == RealScalar(0)) { rot1.c() = RealScalar(0); rot1.s() = d > RealScalar(0) ? RealScalar(1) : RealScalar(-1); } else { RealScalar t2d2 = numext::hypot(t,d); rot1.c() = abs(t)/t2d2; rot1.s() = d/t2d2; if(tmakeJacobi(m,0,1); *j_left = rot1 * j_right->transpose(); } } // end namespace internal /** \ingroup SVD_Module * * * \class JacobiSVD * * \brief Two-sided Jacobi SVD decomposition of a rectangular matrix * * \param MatrixType the type of the matrix of which we are computing the SVD decomposition * \param QRPreconditioner this optional parameter allows to specify the type of QR decomposition that will be used internally * for the R-SVD step for non-square matrices. See discussion of possible values below. * * SVD decomposition consists in decomposing any n-by-p matrix \a A as a product * \f[ A = U S V^* \f] * where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal; * the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left * and right \em singular \em vectors of \a A respectively. * * Singular values are always sorted in decreasing order. * * This JacobiSVD decomposition computes only the singular values by default. If you want \a U or \a V, you need to ask for them explicitly. * * You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the * smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual * singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix, * and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving. * * Here's an example demonstrating basic usage: * \include JacobiSVD_basic.cpp * Output: \verbinclude JacobiSVD_basic.out * * This JacobiSVD class is a two-sided Jacobi R-SVD decomposition, ensuring optimal reliability and accuracy. The downside is that it's slower than * bidiagonalizing SVD algorithms for large square matrices; however its complexity is still \f$ O(n^2p) \f$ where \a n is the smaller dimension and * \a p is the greater dimension, meaning that it is still of the same order of complexity as the faster bidiagonalizing R-SVD algorithms. * In particular, like any R-SVD, it takes advantage of non-squareness in that its complexity is only linear in the greater dimension. * * If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to * terminate in finite (and reasonable) time. * * The possible values for QRPreconditioner are: * \li ColPivHouseholderQRPreconditioner is the default. In practice it's very safe. It uses column-pivoting QR. * \li FullPivHouseholderQRPreconditioner, is the safest and slowest. It uses full-pivoting QR. * Contrary to other QRs, it doesn't allow computing thin unitaries. * \li HouseholderQRPreconditioner is the fastest, and less safe and accurate than the pivoting variants. It uses non-pivoting QR. * This is very similar in safety and accuracy to the bidiagonalization process used by bidiagonalizing SVD algorithms (since bidiagonalization * is inherently non-pivoting). However the resulting SVD is still more reliable than bidiagonalizing SVDs because the Jacobi-based iterarive * process is more reliable than the optimized bidiagonal SVD iterations. * \li NoQRPreconditioner allows not to use a QR preconditioner at all. This is useful if you know that you will only be computing * JacobiSVD decompositions of square matrices. Non-square matrices require a QR preconditioner. Using this option will result in * faster compilation and smaller executable code. It won't significantly speed up computation, since JacobiSVD is always checking * if QR preconditioning is needed before applying it anyway. * * \sa MatrixBase::jacobiSvd() */ template class JacobiSVD { public: typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename MatrixType::Index Index; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime), MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime), MatrixOptions = MatrixType::Options }; typedef Matrix MatrixUType; typedef Matrix MatrixVType; typedef typename internal::plain_diag_type::type SingularValuesType; typedef typename internal::plain_row_type::type RowType; typedef typename internal::plain_col_type::type ColType; typedef Matrix WorkMatrixType; /** \brief Default Constructor. * * The default constructor is useful in cases in which the user intends to * perform decompositions via JacobiSVD::compute(const MatrixType&). */ JacobiSVD() : m_isInitialized(false), m_isAllocated(false), m_usePrescribedThreshold(false), m_computationOptions(0), m_rows(-1), m_cols(-1), m_diagSize(0) {} /** \brief Default Constructor with memory preallocation * * Like the default constructor but with preallocation of the internal data * according to the specified problem size. * \sa JacobiSVD() */ JacobiSVD(Index rows, Index cols, unsigned int computationOptions = 0) : m_isInitialized(false), m_isAllocated(false), m_usePrescribedThreshold(false), m_computationOptions(0), m_rows(-1), m_cols(-1) { allocate(rows, cols, computationOptions); } /** \brief Constructor performing the decomposition of given matrix. * * \param matrix the matrix to decompose * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU, * #ComputeFullV, #ComputeThinV. * * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not * available with the (non-default) FullPivHouseholderQR preconditioner. */ JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0) : m_isInitialized(false), m_isAllocated(false), m_usePrescribedThreshold(false), m_computationOptions(0), m_rows(-1), m_cols(-1) { compute(matrix, computationOptions); } /** \brief Method performing the decomposition of given matrix using custom options. * * \param matrix the matrix to decompose * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU, * #ComputeFullV, #ComputeThinV. * * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not * available with the (non-default) FullPivHouseholderQR preconditioner. */ JacobiSVD& compute(const MatrixType& matrix, unsigned int computationOptions); /** \brief Method performing the decomposition of given matrix using current options. * * \param matrix the matrix to decompose * * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int). */ JacobiSVD& compute(const MatrixType& matrix) { return compute(matrix, m_computationOptions); } /** \returns the \a U matrix. * * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, * the U matrix is n-by-n if you asked for #ComputeFullU, and is n-by-m if you asked for #ComputeThinU. * * The \a m first columns of \a U are the left singular vectors of the matrix being decomposed. * * This method asserts that you asked for \a U to be computed. */ const MatrixUType& matrixU() const { eigen_assert(m_isInitialized && "JacobiSVD is not initialized."); eigen_assert(computeU() && "This JacobiSVD decomposition didn't compute U. Did you ask for it?"); return m_matrixU; } /** \returns the \a V matrix. * * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, * the V matrix is p-by-p if you asked for #ComputeFullV, and is p-by-m if you asked for ComputeThinV. * * The \a m first columns of \a V are the right singular vectors of the matrix being decomposed. * * This method asserts that you asked for \a V to be computed. */ const MatrixVType& matrixV() const { eigen_assert(m_isInitialized && "JacobiSVD is not initialized."); eigen_assert(computeV() && "This JacobiSVD decomposition didn't compute V. Did you ask for it?"); return m_matrixV; } /** \returns the vector of singular values. * * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, the * returned vector has size \a m. Singular values are always sorted in decreasing order. */ const SingularValuesType& singularValues() const { eigen_assert(m_isInitialized && "JacobiSVD is not initialized."); return m_singularValues; } /** \returns true if \a U (full or thin) is asked for in this SVD decomposition */ inline bool computeU() const { return m_computeFullU || m_computeThinU; } /** \returns true if \a V (full or thin) is asked for in this SVD decomposition */ inline bool computeV() const { return m_computeFullV || m_computeThinV; } /** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A. * * \param b the right-hand-side of the equation to solve. * * \note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V. * * \note SVD solving is implicitly least-squares. Thus, this method serves both purposes of exact solving and least-squares solving. * In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$. */ template inline const internal::solve_retval solve(const MatrixBase& b) const { eigen_assert(m_isInitialized && "JacobiSVD is not initialized."); eigen_assert(computeU() && computeV() && "JacobiSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice)."); return internal::solve_retval(*this, b.derived()); } /** \returns the number of singular values that are not exactly 0 */ Index nonzeroSingularValues() const { eigen_assert(m_isInitialized && "JacobiSVD is not initialized."); return m_nonzeroSingularValues; } /** \returns the rank of the matrix of which \c *this is the SVD. * * \note This method has to determine which singular values should be considered nonzero. * For that, it uses the threshold value that you can control by calling * setThreshold(const RealScalar&). */ inline Index rank() const { using std::abs; eigen_assert(m_isInitialized && "JacobiSVD is not initialized."); if(m_singularValues.size()==0) return 0; RealScalar premultiplied_threshold = m_singularValues.coeff(0) * threshold(); Index i = m_nonzeroSingularValues-1; while(i>=0 && m_singularValues.coeff(i) < premultiplied_threshold) --i; return i+1; } /** Allows to prescribe a threshold to be used by certain methods, such as rank() and solve(), * which need to determine when singular values are to be considered nonzero. * This is not used for the SVD decomposition itself. * * When it needs to get the threshold value, Eigen calls threshold(). * The default is \c NumTraits::epsilon() * * \param threshold The new value to use as the threshold. * * A singular value will be considered nonzero if its value is strictly greater than * \f$ \vert singular value \vert \leqslant threshold \times \vert max singular value \vert \f$. * * If you want to come back to the default behavior, call setThreshold(Default_t) */ JacobiSVD& setThreshold(const RealScalar& threshold) { m_usePrescribedThreshold = true; m_prescribedThreshold = threshold; return *this; } /** Allows to come back to the default behavior, letting Eigen use its default formula for * determining the threshold. * * You should pass the special object Eigen::Default as parameter here. * \code svd.setThreshold(Eigen::Default); \endcode * * See the documentation of setThreshold(const RealScalar&). */ JacobiSVD& setThreshold(Default_t) { m_usePrescribedThreshold = false; return *this; } /** Returns the threshold that will be used by certain methods such as rank(). * * See the documentation of setThreshold(const RealScalar&). */ RealScalar threshold() const { eigen_assert(m_isInitialized || m_usePrescribedThreshold); return m_usePrescribedThreshold ? m_prescribedThreshold : (std::max)(1,m_diagSize)*NumTraits::epsilon(); } inline Index rows() const { return m_rows; } inline Index cols() const { return m_cols; } private: void allocate(Index rows, Index cols, unsigned int computationOptions); protected: MatrixUType m_matrixU; MatrixVType m_matrixV; SingularValuesType m_singularValues; WorkMatrixType m_workMatrix; bool m_isInitialized, m_isAllocated, m_usePrescribedThreshold; bool m_computeFullU, m_computeThinU; bool m_computeFullV, m_computeThinV; unsigned int m_computationOptions; Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize; RealScalar m_prescribedThreshold; template friend struct internal::svd_precondition_2x2_block_to_be_real; template friend struct internal::qr_preconditioner_impl; internal::qr_preconditioner_impl m_qr_precond_morecols; internal::qr_preconditioner_impl m_qr_precond_morerows; MatrixType m_scaledMatrix; }; template void JacobiSVD::allocate(Index rows, Index cols, unsigned int computationOptions) { eigen_assert(rows >= 0 && cols >= 0); if (m_isAllocated && rows == m_rows && cols == m_cols && computationOptions == m_computationOptions) { return; } m_rows = rows; m_cols = cols; m_isInitialized = false; m_isAllocated = true; m_computationOptions = computationOptions; m_computeFullU = (computationOptions & ComputeFullU) != 0; m_computeThinU = (computationOptions & ComputeThinU) != 0; m_computeFullV = (computationOptions & ComputeFullV) != 0; m_computeThinV = (computationOptions & ComputeThinV) != 0; eigen_assert(!(m_computeFullU && m_computeThinU) && "JacobiSVD: you can't ask for both full and thin U"); eigen_assert(!(m_computeFullV && m_computeThinV) && "JacobiSVD: you can't ask for both full and thin V"); eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) && "JacobiSVD: thin U and V are only available when your matrix has a dynamic number of columns."); if (QRPreconditioner == FullPivHouseholderQRPreconditioner) { eigen_assert(!(m_computeThinU || m_computeThinV) && "JacobiSVD: can't compute thin U or thin V with the FullPivHouseholderQR preconditioner. " "Use the ColPivHouseholderQR preconditioner instead."); } m_diagSize = (std::min)(m_rows, m_cols); m_singularValues.resize(m_diagSize); if(RowsAtCompileTime==Dynamic) m_matrixU.resize(m_rows, m_computeFullU ? m_rows : m_computeThinU ? m_diagSize : 0); if(ColsAtCompileTime==Dynamic) m_matrixV.resize(m_cols, m_computeFullV ? m_cols : m_computeThinV ? m_diagSize : 0); m_workMatrix.resize(m_diagSize, m_diagSize); if(m_cols>m_rows) m_qr_precond_morecols.allocate(*this); if(m_rows>m_cols) m_qr_precond_morerows.allocate(*this); if(m_cols!=m_cols) m_scaledMatrix.resize(rows,cols); } template JacobiSVD& JacobiSVD::compute(const MatrixType& matrix, unsigned int computationOptions) { using std::abs; allocate(matrix.rows(), matrix.cols(), computationOptions); // currently we stop when we reach precision 2*epsilon as the last bit of precision can require an unreasonable number of iterations, // only worsening the precision of U and V as we accumulate more rotations const RealScalar precision = RealScalar(2) * NumTraits::epsilon(); // limit for very small denormal numbers to be considered zero in order to avoid infinite loops (see bug 286) const RealScalar considerAsZero = RealScalar(2) * std::numeric_limits::denorm_min(); // Scaling factor to reduce over/under-flows RealScalar scale = matrix.cwiseAbs().maxCoeff(); if(scale==RealScalar(0)) scale = RealScalar(1); /*** step 1. The R-SVD step: we use a QR decomposition to reduce to the case of a square matrix */ if(m_rows!=m_cols) { m_scaledMatrix = matrix / scale; m_qr_precond_morecols.run(*this, m_scaledMatrix); m_qr_precond_morerows.run(*this, m_scaledMatrix); } else { m_workMatrix = matrix.block(0,0,m_diagSize,m_diagSize) / scale; if(m_computeFullU) m_matrixU.setIdentity(m_rows,m_rows); if(m_computeThinU) m_matrixU.setIdentity(m_rows,m_diagSize); if(m_computeFullV) m_matrixV.setIdentity(m_cols,m_cols); if(m_computeThinV) m_matrixV.setIdentity(m_cols, m_diagSize); } /*** step 2. The main Jacobi SVD iteration. ***/ bool finished = false; while(!finished) { finished = true; // do a sweep: for all index pairs (p,q), perform SVD of the corresponding 2x2 sub-matrix for(Index p = 1; p < m_diagSize; ++p) { for(Index q = 0; q < p; ++q) { // if this 2x2 sub-matrix is not diagonal already... // notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't // keep us iterating forever. Similarly, small denormal numbers are considered zero. using std::max; RealScalar threshold = (max)(considerAsZero, precision * (max)(abs(m_workMatrix.coeff(p,p)), abs(m_workMatrix.coeff(q,q)))); // We compare both values to threshold instead of calling max to be robust to NaN (See bug 791) if(abs(m_workMatrix.coeff(p,q))>threshold || abs(m_workMatrix.coeff(q,p)) > threshold) { finished = false; // perform SVD decomposition of 2x2 sub-matrix corresponding to indices p,q to make it diagonal internal::svd_precondition_2x2_block_to_be_real::run(m_workMatrix, *this, p, q); JacobiRotation j_left, j_right; internal::real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right); // accumulate resulting Jacobi rotations m_workMatrix.applyOnTheLeft(p,q,j_left); if(computeU()) m_matrixU.applyOnTheRight(p,q,j_left.transpose()); m_workMatrix.applyOnTheRight(p,q,j_right); if(computeV()) m_matrixV.applyOnTheRight(p,q,j_right); } } } } /*** step 3. The work matrix is now diagonal, so ensure it's positive so its diagonal entries are the singular values ***/ for(Index i = 0; i < m_diagSize; ++i) { RealScalar a = abs(m_workMatrix.coeff(i,i)); m_singularValues.coeffRef(i) = a; if(computeU() && (a!=RealScalar(0))) m_matrixU.col(i) *= m_workMatrix.coeff(i,i)/a; } /*** step 4. Sort singular values in descending order and compute the number of nonzero singular values ***/ m_nonzeroSingularValues = m_diagSize; for(Index i = 0; i < m_diagSize; i++) { Index pos; RealScalar maxRemainingSingularValue = m_singularValues.tail(m_diagSize-i).maxCoeff(&pos); if(maxRemainingSingularValue == RealScalar(0)) { m_nonzeroSingularValues = i; break; } if(pos) { pos += i; std::swap(m_singularValues.coeffRef(i), m_singularValues.coeffRef(pos)); if(computeU()) m_matrixU.col(pos).swap(m_matrixU.col(i)); if(computeV()) m_matrixV.col(pos).swap(m_matrixV.col(i)); } } m_singularValues *= scale; m_isInitialized = true; return *this; } namespace internal { template struct solve_retval, Rhs> : solve_retval_base, Rhs> { typedef JacobiSVD<_MatrixType, QRPreconditioner> JacobiSVDType; EIGEN_MAKE_SOLVE_HELPERS(JacobiSVDType,Rhs) template void evalTo(Dest& dst) const { eigen_assert(rhs().rows() == dec().rows()); // A = U S V^* // So A^{-1} = V S^{-1} U^* Matrix tmp; Index rank = dec().rank(); tmp.noalias() = dec().matrixU().leftCols(rank).adjoint() * rhs(); tmp = dec().singularValues().head(rank).asDiagonal().inverse() * tmp; dst = dec().matrixV().leftCols(rank) * tmp; } }; } // end namespace internal /** \svd_module * * \return the singular value decomposition of \c *this computed by two-sided * Jacobi transformations. * * \sa class JacobiSVD */ template JacobiSVD::PlainObject> MatrixBase::jacobiSvd(unsigned int computationOptions) const { return JacobiSVD(*this, computationOptions); } } // end namespace Eigen #endif // EIGEN_JACOBISVD_H antimony/lib/fab/vendor/Eigen/src/SVD/JacobiSVD_MKL.h0000644000175000017500000001105613341130426021371 0ustar tiagotiago/* Copyright (c) 2011, Intel Corporation. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************** * Content : Eigen bindings to Intel(R) MKL * Singular Value Decomposition - SVD. ******************************************************************************** */ #ifndef EIGEN_JACOBISVD_MKL_H #define EIGEN_JACOBISVD_MKL_H #include "Eigen/src/Core/util/MKL_support.h" namespace Eigen { /** \internal Specialization for the data types supported by MKL */ #define EIGEN_MKL_SVD(EIGTYPE, MKLTYPE, MKLRTYPE, MKLPREFIX, EIGCOLROW, MKLCOLROW) \ template<> inline \ JacobiSVD, ColPivHouseholderQRPreconditioner>& \ JacobiSVD, ColPivHouseholderQRPreconditioner>::compute(const Matrix& matrix, unsigned int computationOptions) \ { \ typedef Matrix MatrixType; \ typedef MatrixType::Scalar Scalar; \ typedef MatrixType::RealScalar RealScalar; \ allocate(matrix.rows(), matrix.cols(), computationOptions); \ \ /*const RealScalar precision = RealScalar(2) * NumTraits::epsilon();*/ \ m_nonzeroSingularValues = m_diagSize; \ \ lapack_int lda = matrix.outerStride(), ldu, ldvt; \ lapack_int matrix_order = MKLCOLROW; \ char jobu, jobvt; \ MKLTYPE *u, *vt, dummy; \ jobu = (m_computeFullU) ? 'A' : (m_computeThinU) ? 'S' : 'N'; \ jobvt = (m_computeFullV) ? 'A' : (m_computeThinV) ? 'S' : 'N'; \ if (computeU()) { \ ldu = m_matrixU.outerStride(); \ u = (MKLTYPE*)m_matrixU.data(); \ } else { ldu=1; u=&dummy; }\ MatrixType localV; \ ldvt = (m_computeFullV) ? m_cols : (m_computeThinV) ? m_diagSize : 1; \ if (computeV()) { \ localV.resize(ldvt, m_cols); \ vt = (MKLTYPE*)localV.data(); \ } else { ldvt=1; vt=&dummy; }\ Matrix superb; superb.resize(m_diagSize, 1); \ MatrixType m_temp; m_temp = matrix; \ LAPACKE_##MKLPREFIX##gesvd( matrix_order, jobu, jobvt, m_rows, m_cols, (MKLTYPE*)m_temp.data(), lda, (MKLRTYPE*)m_singularValues.data(), u, ldu, vt, ldvt, superb.data()); \ if (computeV()) m_matrixV = localV.adjoint(); \ /* for(int i=0;i // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_BIDIAGONALIZATION_H #define EIGEN_BIDIAGONALIZATION_H namespace Eigen { namespace internal { // UpperBidiagonalization will probably be replaced by a Bidiagonalization class, don't want to make it stable API. // At the same time, it's useful to keep for now as it's about the only thing that is testing the BandMatrix class. template class UpperBidiagonalization { public: typedef _MatrixType MatrixType; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, ColsAtCompileTimeMinusOne = internal::decrement_size::ret }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; typedef Matrix RowVectorType; typedef Matrix ColVectorType; typedef BandMatrix BidiagonalType; typedef Matrix DiagVectorType; typedef Matrix SuperDiagVectorType; typedef HouseholderSequence< const MatrixType, CwiseUnaryOp, const Diagonal > > HouseholderUSequenceType; typedef HouseholderSequence< const typename internal::remove_all::type, Diagonal, OnTheRight > HouseholderVSequenceType; /** * \brief Default Constructor. * * The default constructor is useful in cases in which the user intends to * perform decompositions via Bidiagonalization::compute(const MatrixType&). */ UpperBidiagonalization() : m_householder(), m_bidiagonal(), m_isInitialized(false) {} UpperBidiagonalization(const MatrixType& matrix) : m_householder(matrix.rows(), matrix.cols()), m_bidiagonal(matrix.cols(), matrix.cols()), m_isInitialized(false) { compute(matrix); } UpperBidiagonalization& compute(const MatrixType& matrix); const MatrixType& householder() const { return m_householder; } const BidiagonalType& bidiagonal() const { return m_bidiagonal; } const HouseholderUSequenceType householderU() const { eigen_assert(m_isInitialized && "UpperBidiagonalization is not initialized."); return HouseholderUSequenceType(m_householder, m_householder.diagonal().conjugate()); } const HouseholderVSequenceType householderV() // const here gives nasty errors and i'm lazy { eigen_assert(m_isInitialized && "UpperBidiagonalization is not initialized."); return HouseholderVSequenceType(m_householder.conjugate(), m_householder.const_derived().template diagonal<1>()) .setLength(m_householder.cols()-1) .setShift(1); } protected: MatrixType m_householder; BidiagonalType m_bidiagonal; bool m_isInitialized; }; template UpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::compute(const _MatrixType& matrix) { Index rows = matrix.rows(); Index cols = matrix.cols(); eigen_assert(rows >= cols && "UpperBidiagonalization is only for matrices satisfying rows>=cols."); m_householder = matrix; ColVectorType temp(rows); for (Index k = 0; /* breaks at k==cols-1 below */ ; ++k) { Index remainingRows = rows - k; Index remainingCols = cols - k - 1; // construct left householder transform in-place in m_householder m_householder.col(k).tail(remainingRows) .makeHouseholderInPlace(m_householder.coeffRef(k,k), m_bidiagonal.template diagonal<0>().coeffRef(k)); // apply householder transform to remaining part of m_householder on the left m_householder.bottomRightCorner(remainingRows, remainingCols) .applyHouseholderOnTheLeft(m_householder.col(k).tail(remainingRows-1), m_householder.coeff(k,k), temp.data()); if(k == cols-1) break; // construct right householder transform in-place in m_householder m_householder.row(k).tail(remainingCols) .makeHouseholderInPlace(m_householder.coeffRef(k,k+1), m_bidiagonal.template diagonal<1>().coeffRef(k)); // apply householder transform to remaining part of m_householder on the left m_householder.bottomRightCorner(remainingRows-1, remainingCols) .applyHouseholderOnTheRight(m_householder.row(k).tail(remainingCols-1).transpose(), m_householder.coeff(k,k+1), temp.data()); } m_isInitialized = true; return *this; } #if 0 /** \return the Householder QR decomposition of \c *this. * * \sa class Bidiagonalization */ template const UpperBidiagonalization::PlainObject> MatrixBase::bidiagonalization() const { return UpperBidiagonalization(eval()); } #endif } // end namespace internal } // end namespace Eigen #endif // EIGEN_BIDIAGONALIZATION_H antimony/lib/fab/vendor/Eigen/src/SparseQR/0000755000175000017500000000000013341130426020012 5ustar tiagotiagoantimony/lib/fab/vendor/Eigen/src/SparseQR/CMakeLists.txt0000644000175000017500000000023213341130426022547 0ustar tiagotiagoFILE(GLOB Eigen_SparseQR_SRCS "*.h") INSTALL(FILES ${Eigen_SparseQR_SRCS} DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/SparseQR/ COMPONENT Devel ) antimony/lib/fab/vendor/Eigen/src/SparseQR/SparseQR.h0000644000175000017500000006352113341130426021672 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012-2013 Desire Nuentsa // Copyright (C) 2012-2014 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_QR_H #define EIGEN_SPARSE_QR_H namespace Eigen { template class SparseQR; template struct SparseQRMatrixQReturnType; template struct SparseQRMatrixQTransposeReturnType; template struct SparseQR_QProduct; namespace internal { template struct traits > { typedef typename SparseQRType::MatrixType ReturnType; typedef typename ReturnType::Index Index; typedef typename ReturnType::StorageKind StorageKind; }; template struct traits > { typedef typename SparseQRType::MatrixType ReturnType; }; template struct traits > { typedef typename Derived::PlainObject ReturnType; }; } // End namespace internal /** * \ingroup SparseQR_Module * \class SparseQR * \brief Sparse left-looking rank-revealing QR factorization * * This class implements a left-looking rank-revealing QR decomposition * of sparse matrices. When a column has a norm less than a given tolerance * it is implicitly permuted to the end. The QR factorization thus obtained is * given by A*P = Q*R where R is upper triangular or trapezoidal. * * P is the column permutation which is the product of the fill-reducing and the * rank-revealing permutations. Use colsPermutation() to get it. * * Q is the orthogonal matrix represented as products of Householder reflectors. * Use matrixQ() to get an expression and matrixQ().transpose() to get the transpose. * You can then apply it to a vector. * * R is the sparse triangular or trapezoidal matrix. The later occurs when A is rank-deficient. * matrixR().topLeftCorner(rank(), rank()) always returns a triangular factor of full rank. * * \tparam _MatrixType The type of the sparse matrix A, must be a column-major SparseMatrix<> * \tparam _OrderingType The fill-reducing ordering method. See the \link OrderingMethods_Module * OrderingMethods \endlink module for the list of built-in and external ordering methods. * * \warning The input sparse matrix A must be in compressed mode (see SparseMatrix::makeCompressed()). * */ template class SparseQR { public: typedef _MatrixType MatrixType; typedef _OrderingType OrderingType; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; typedef SparseMatrix QRMatrixType; typedef Matrix IndexVector; typedef Matrix ScalarVector; typedef PermutationMatrix PermutationType; public: SparseQR () : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false) { } /** Construct a QR factorization of the matrix \a mat. * * \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()). * * \sa compute() */ SparseQR(const MatrixType& mat) : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false) { compute(mat); } /** Computes the QR factorization of the sparse matrix \a mat. * * \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()). * * \sa analyzePattern(), factorize() */ void compute(const MatrixType& mat) { analyzePattern(mat); factorize(mat); } void analyzePattern(const MatrixType& mat); void factorize(const MatrixType& mat); /** \returns the number of rows of the represented matrix. */ inline Index rows() const { return m_pmat.rows(); } /** \returns the number of columns of the represented matrix. */ inline Index cols() const { return m_pmat.cols();} /** \returns a const reference to the \b sparse upper triangular matrix R of the QR factorization. */ const QRMatrixType& matrixR() const { return m_R; } /** \returns the number of non linearly dependent columns as determined by the pivoting threshold. * * \sa setPivotThreshold() */ Index rank() const { eigen_assert(m_isInitialized && "The factorization should be called first, use compute()"); return m_nonzeropivots; } /** \returns an expression of the matrix Q as products of sparse Householder reflectors. * The common usage of this function is to apply it to a dense matrix or vector * \code * VectorXd B1, B2; * // Initialize B1 * B2 = matrixQ() * B1; * \endcode * * To get a plain SparseMatrix representation of Q: * \code * SparseMatrix Q; * Q = SparseQR >(A).matrixQ(); * \endcode * Internally, this call simply performs a sparse product between the matrix Q * and a sparse identity matrix. However, due to the fact that the sparse * reflectors are stored unsorted, two transpositions are needed to sort * them before performing the product. */ SparseQRMatrixQReturnType matrixQ() const { return SparseQRMatrixQReturnType(*this); } /** \returns a const reference to the column permutation P that was applied to A such that A*P = Q*R * It is the combination of the fill-in reducing permutation and numerical column pivoting. */ const PermutationType& colsPermutation() const { eigen_assert(m_isInitialized && "Decomposition is not initialized."); return m_outputPerm_c; } /** \returns A string describing the type of error. * This method is provided to ease debugging, not to handle errors. */ std::string lastErrorMessage() const { return m_lastError; } /** \internal */ template bool _solve(const MatrixBase &B, MatrixBase &dest) const { eigen_assert(m_isInitialized && "The factorization should be called first, use compute()"); eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix"); Index rank = this->rank(); // Compute Q^T * b; typename Dest::PlainObject y, b; y = this->matrixQ().transpose() * B; b = y; // Solve with the triangular matrix R y.resize((std::max)(cols(),Index(y.rows())),y.cols()); y.topRows(rank) = this->matrixR().topLeftCorner(rank, rank).template triangularView().solve(b.topRows(rank)); y.bottomRows(y.rows()-rank).setZero(); // Apply the column permutation if (m_perm_c.size()) dest = colsPermutation() * y.topRows(cols()); else dest = y.topRows(cols()); m_info = Success; return true; } /** Sets the threshold that is used to determine linearly dependent columns during the factorization. * * In practice, if during the factorization the norm of the column that has to be eliminated is below * this threshold, then the entire column is treated as zero, and it is moved at the end. */ void setPivotThreshold(const RealScalar& threshold) { m_useDefaultThreshold = false; m_threshold = threshold; } /** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A. * * \sa compute() */ template inline const internal::solve_retval solve(const MatrixBase& B) const { eigen_assert(m_isInitialized && "The factorization should be called first, use compute()"); eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix"); return internal::solve_retval(*this, B.derived()); } template inline const internal::sparse_solve_retval solve(const SparseMatrixBase& B) const { eigen_assert(m_isInitialized && "The factorization should be called first, use compute()"); eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix"); return internal::sparse_solve_retval(*this, B.derived()); } /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was successful, * \c NumericalIssue if the QR factorization reports a numerical problem * \c InvalidInput if the input matrix is invalid * * \sa iparm() */ ComputationInfo info() const { eigen_assert(m_isInitialized && "Decomposition is not initialized."); return m_info; } protected: inline void sort_matrix_Q() { if(this->m_isQSorted) return; // The matrix Q is sorted during the transposition SparseMatrix mQrm(this->m_Q); this->m_Q = mQrm; this->m_isQSorted = true; } protected: bool m_isInitialized; bool m_analysisIsok; bool m_factorizationIsok; mutable ComputationInfo m_info; std::string m_lastError; QRMatrixType m_pmat; // Temporary matrix QRMatrixType m_R; // The triangular factor matrix QRMatrixType m_Q; // The orthogonal reflectors ScalarVector m_hcoeffs; // The Householder coefficients PermutationType m_perm_c; // Fill-reducing Column permutation PermutationType m_pivotperm; // The permutation for rank revealing PermutationType m_outputPerm_c; // The final column permutation RealScalar m_threshold; // Threshold to determine null Householder reflections bool m_useDefaultThreshold; // Use default threshold Index m_nonzeropivots; // Number of non zero pivots found IndexVector m_etree; // Column elimination tree IndexVector m_firstRowElt; // First element in each row bool m_isQSorted; // whether Q is sorted or not bool m_isEtreeOk; // whether the elimination tree match the initial input matrix template friend struct SparseQR_QProduct; template friend struct SparseQRMatrixQReturnType; }; /** \brief Preprocessing step of a QR factorization * * \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()). * * In this step, the fill-reducing permutation is computed and applied to the columns of A * and the column elimination tree is computed as well. Only the sparsity pattern of \a mat is exploited. * * \note In this step it is assumed that there is no empty row in the matrix \a mat. */ template void SparseQR::analyzePattern(const MatrixType& mat) { eigen_assert(mat.isCompressed() && "SparseQR requires a sparse matrix in compressed mode. Call .makeCompressed() before passing it to SparseQR"); // Copy to a column major matrix if the input is rowmajor typename internal::conditional::type matCpy(mat); // Compute the column fill reducing ordering OrderingType ord; ord(matCpy, m_perm_c); Index n = mat.cols(); Index m = mat.rows(); Index diagSize = (std::min)(m,n); if (!m_perm_c.size()) { m_perm_c.resize(n); m_perm_c.indices().setLinSpaced(n, 0,n-1); } // Compute the column elimination tree of the permuted matrix m_outputPerm_c = m_perm_c.inverse(); internal::coletree(matCpy, m_etree, m_firstRowElt, m_outputPerm_c.indices().data()); m_isEtreeOk = true; m_R.resize(m, n); m_Q.resize(m, diagSize); // Allocate space for nonzero elements : rough estimation m_R.reserve(2*mat.nonZeros()); //FIXME Get a more accurate estimation through symbolic factorization with the etree m_Q.reserve(2*mat.nonZeros()); m_hcoeffs.resize(diagSize); m_analysisIsok = true; } /** \brief Performs the numerical QR factorization of the input matrix * * The function SparseQR::analyzePattern(const MatrixType&) must have been called beforehand with * a matrix having the same sparsity pattern than \a mat. * * \param mat The sparse column-major matrix */ template void SparseQR::factorize(const MatrixType& mat) { using std::abs; using std::max; eigen_assert(m_analysisIsok && "analyzePattern() should be called before this step"); Index m = mat.rows(); Index n = mat.cols(); Index diagSize = (std::min)(m,n); IndexVector mark((std::max)(m,n)); mark.setConstant(-1); // Record the visited nodes IndexVector Ridx(n), Qidx(m); // Store temporarily the row indexes for the current column of R and Q Index nzcolR, nzcolQ; // Number of nonzero for the current column of R and Q ScalarVector tval(m); // The dense vector used to compute the current column RealScalar pivotThreshold = m_threshold; m_R.setZero(); m_Q.setZero(); m_pmat = mat; if(!m_isEtreeOk) { m_outputPerm_c = m_perm_c.inverse(); internal::coletree(m_pmat, m_etree, m_firstRowElt, m_outputPerm_c.indices().data()); m_isEtreeOk = true; } m_pmat.uncompress(); // To have the innerNonZeroPtr allocated // Apply the fill-in reducing permutation lazily: { // If the input is row major, copy the original column indices, // otherwise directly use the input matrix // IndexVector originalOuterIndicesCpy; const Index *originalOuterIndices = mat.outerIndexPtr(); if(MatrixType::IsRowMajor) { originalOuterIndicesCpy = IndexVector::Map(m_pmat.outerIndexPtr(),n+1); originalOuterIndices = originalOuterIndicesCpy.data(); } for (int i = 0; i < n; i++) { Index p = m_perm_c.size() ? m_perm_c.indices()(i) : i; m_pmat.outerIndexPtr()[p] = originalOuterIndices[i]; m_pmat.innerNonZeroPtr()[p] = originalOuterIndices[i+1] - originalOuterIndices[i]; } } /* Compute the default threshold as in MatLab, see: * Tim Davis, "Algorithm 915, SuiteSparseQR: Multifrontal Multithreaded Rank-Revealing * Sparse QR Factorization, ACM Trans. on Math. Soft. 38(1), 2011, Page 8:3 */ if(m_useDefaultThreshold) { RealScalar max2Norm = 0.0; for (int j = 0; j < n; j++) max2Norm = (max)(max2Norm, m_pmat.col(j).norm()); if(max2Norm==RealScalar(0)) max2Norm = RealScalar(1); pivotThreshold = 20 * (m + n) * max2Norm * NumTraits::epsilon(); } // Initialize the numerical permutation m_pivotperm.setIdentity(n); Index nonzeroCol = 0; // Record the number of valid pivots m_Q.startVec(0); // Left looking rank-revealing QR factorization: compute a column of R and Q at a time for (Index col = 0; col < n; ++col) { mark.setConstant(-1); m_R.startVec(col); mark(nonzeroCol) = col; Qidx(0) = nonzeroCol; nzcolR = 0; nzcolQ = 1; bool found_diag = nonzeroCol>=m; tval.setZero(); // Symbolic factorization: find the nonzero locations of the column k of the factors R and Q, i.e., // all the nodes (with indexes lower than rank) reachable through the column elimination tree (etree) rooted at node k. // Note: if the diagonal entry does not exist, then its contribution must be explicitly added, // thus the trick with found_diag that permits to do one more iteration on the diagonal element if this one has not been found. for (typename QRMatrixType::InnerIterator itp(m_pmat, col); itp || !found_diag; ++itp) { Index curIdx = nonzeroCol; if(itp) curIdx = itp.row(); if(curIdx == nonzeroCol) found_diag = true; // Get the nonzeros indexes of the current column of R Index st = m_firstRowElt(curIdx); // The traversal of the etree starts here if (st < 0 ) { m_lastError = "Empty row found during numerical factorization"; m_info = InvalidInput; return; } // Traverse the etree Index bi = nzcolR; for (; mark(st) != col; st = m_etree(st)) { Ridx(nzcolR) = st; // Add this row to the list, mark(st) = col; // and mark this row as visited nzcolR++; } // Reverse the list to get the topological ordering Index nt = nzcolR-bi; for(Index i = 0; i < nt/2; i++) std::swap(Ridx(bi+i), Ridx(nzcolR-i-1)); // Copy the current (curIdx,pcol) value of the input matrix if(itp) tval(curIdx) = itp.value(); else tval(curIdx) = Scalar(0); // Compute the pattern of Q(:,k) if(curIdx > nonzeroCol && mark(curIdx) != col ) { Qidx(nzcolQ) = curIdx; // Add this row to the pattern of Q, mark(curIdx) = col; // and mark it as visited nzcolQ++; } } // Browse all the indexes of R(:,col) in reverse order for (Index i = nzcolR-1; i >= 0; i--) { Index curIdx = Ridx(i); // Apply the curIdx-th householder vector to the current column (temporarily stored into tval) Scalar tdot(0); // First compute q' * tval tdot = m_Q.col(curIdx).dot(tval); tdot *= m_hcoeffs(curIdx); // Then update tval = tval - q * tau // FIXME: tval -= tdot * m_Q.col(curIdx) should amount to the same (need to check/add support for efficient "dense ?= sparse") for (typename QRMatrixType::InnerIterator itq(m_Q, curIdx); itq; ++itq) tval(itq.row()) -= itq.value() * tdot; // Detect fill-in for the current column of Q if(m_etree(Ridx(i)) == nonzeroCol) { for (typename QRMatrixType::InnerIterator itq(m_Q, curIdx); itq; ++itq) { Index iQ = itq.row(); if (mark(iQ) != col) { Qidx(nzcolQ++) = iQ; // Add this row to the pattern of Q, mark(iQ) = col; // and mark it as visited } } } } // End update current column Scalar tau = 0; RealScalar beta = 0; if(nonzeroCol < diagSize) { // Compute the Householder reflection that eliminate the current column // FIXME this step should call the Householder module. Scalar c0 = nzcolQ ? tval(Qidx(0)) : Scalar(0); // First, the squared norm of Q((col+1):m, col) RealScalar sqrNorm = 0.; for (Index itq = 1; itq < nzcolQ; ++itq) sqrNorm += numext::abs2(tval(Qidx(itq))); if(sqrNorm == RealScalar(0) && numext::imag(c0) == RealScalar(0)) { beta = numext::real(c0); tval(Qidx(0)) = 1; } else { using std::sqrt; beta = sqrt(numext::abs2(c0) + sqrNorm); if(numext::real(c0) >= RealScalar(0)) beta = -beta; tval(Qidx(0)) = 1; for (Index itq = 1; itq < nzcolQ; ++itq) tval(Qidx(itq)) /= (c0 - beta); tau = numext::conj((beta-c0) / beta); } } // Insert values in R for (Index i = nzcolR-1; i >= 0; i--) { Index curIdx = Ridx(i); if(curIdx < nonzeroCol) { m_R.insertBackByOuterInnerUnordered(col, curIdx) = tval(curIdx); tval(curIdx) = Scalar(0.); } } if(nonzeroCol < diagSize && abs(beta) >= pivotThreshold) { m_R.insertBackByOuterInner(col, nonzeroCol) = beta; // The householder coefficient m_hcoeffs(nonzeroCol) = tau; // Record the householder reflections for (Index itq = 0; itq < nzcolQ; ++itq) { Index iQ = Qidx(itq); m_Q.insertBackByOuterInnerUnordered(nonzeroCol,iQ) = tval(iQ); tval(iQ) = Scalar(0.); } nonzeroCol++; if(nonzeroCol struct solve_retval, Rhs> : solve_retval_base, Rhs> { typedef SparseQR<_MatrixType,OrderingType> Dec; EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) template void evalTo(Dest& dst) const { dec()._solve(rhs(),dst); } }; template struct sparse_solve_retval, Rhs> : sparse_solve_retval_base, Rhs> { typedef SparseQR<_MatrixType, OrderingType> Dec; EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec, Rhs) template void evalTo(Dest& dst) const { this->defaultEvalTo(dst); } }; } // end namespace internal template struct SparseQR_QProduct : ReturnByValue > { typedef typename SparseQRType::QRMatrixType MatrixType; typedef typename SparseQRType::Scalar Scalar; typedef typename SparseQRType::Index Index; // Get the references SparseQR_QProduct(const SparseQRType& qr, const Derived& other, bool transpose) : m_qr(qr),m_other(other),m_transpose(transpose) {} inline Index rows() const { return m_transpose ? m_qr.rows() : m_qr.cols(); } inline Index cols() const { return m_other.cols(); } // Assign to a vector template void evalTo(DesType& res) const { Index m = m_qr.rows(); Index n = m_qr.cols(); Index diagSize = (std::min)(m,n); res = m_other; if (m_transpose) { eigen_assert(m_qr.m_Q.rows() == m_other.rows() && "Non conforming object sizes"); //Compute res = Q' * other column by column for(Index j = 0; j < res.cols(); j++){ for (Index k = 0; k < diagSize; k++) { Scalar tau = Scalar(0); tau = m_qr.m_Q.col(k).dot(res.col(j)); if(tau==Scalar(0)) continue; tau = tau * m_qr.m_hcoeffs(k); res.col(j) -= tau * m_qr.m_Q.col(k); } } } else { eigen_assert(m_qr.m_Q.rows() == m_other.rows() && "Non conforming object sizes"); // Compute res = Q * other column by column for(Index j = 0; j < res.cols(); j++) { for (Index k = diagSize-1; k >=0; k--) { Scalar tau = Scalar(0); tau = m_qr.m_Q.col(k).dot(res.col(j)); if(tau==Scalar(0)) continue; tau = tau * m_qr.m_hcoeffs(k); res.col(j) -= tau * m_qr.m_Q.col(k); } } } } const SparseQRType& m_qr; const Derived& m_other; bool m_transpose; }; template struct SparseQRMatrixQReturnType : public EigenBase > { typedef typename SparseQRType::Index Index; typedef typename SparseQRType::Scalar Scalar; typedef Matrix DenseMatrix; SparseQRMatrixQReturnType(const SparseQRType& qr) : m_qr(qr) {} template SparseQR_QProduct operator*(const MatrixBase& other) { return SparseQR_QProduct(m_qr,other.derived(),false); } SparseQRMatrixQTransposeReturnType adjoint() const { return SparseQRMatrixQTransposeReturnType(m_qr); } inline Index rows() const { return m_qr.rows(); } inline Index cols() const { return (std::min)(m_qr.rows(),m_qr.cols()); } // To use for operations with the transpose of Q SparseQRMatrixQTransposeReturnType transpose() const { return SparseQRMatrixQTransposeReturnType(m_qr); } template void evalTo(MatrixBase& dest) const { dest.derived() = m_qr.matrixQ() * Dest::Identity(m_qr.rows(), m_qr.rows()); } template void evalTo(SparseMatrixBase& dest) const { Dest idMat(m_qr.rows(), m_qr.rows()); idMat.setIdentity(); // Sort the sparse householder reflectors if needed const_cast(&m_qr)->sort_matrix_Q(); dest.derived() = SparseQR_QProduct(m_qr, idMat, false); } const SparseQRType& m_qr; }; template struct SparseQRMatrixQTransposeReturnType { SparseQRMatrixQTransposeReturnType(const SparseQRType& qr) : m_qr(qr) {} template SparseQR_QProduct operator*(const MatrixBase& other) { return SparseQR_QProduct(m_qr,other.derived(), true); } const SparseQRType& m_qr; }; } // end namespace Eigen #endif antimony/lib/fab/vendor/Eigen/src/PaStiXSupport/0000755000175000017500000000000013341130426021057 5ustar tiagotiagoantimony/lib/fab/vendor/Eigen/src/PaStiXSupport/CMakeLists.txt0000644000175000017500000000025113341130426023615 0ustar tiagotiagoFILE(GLOB Eigen_PastixSupport_SRCS "*.h") INSTALL(FILES ${Eigen_PastixSupport_SRCS} DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/PaStiXSupport COMPONENT Devel ) antimony/lib/fab/vendor/Eigen/src/PaStiXSupport/PaStiXSupport.h0000644000175000017500000005545413341130426024012 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PASTIXSUPPORT_H #define EIGEN_PASTIXSUPPORT_H namespace Eigen { /** \ingroup PaStiXSupport_Module * \brief Interface to the PaStix solver * * This class is used to solve the linear systems A.X = B via the PaStix library. * The matrix can be either real or complex, symmetric or not. * * \sa TutorialSparseDirectSolvers */ template class PastixLU; template class PastixLLT; template class PastixLDLT; namespace internal { template struct pastix_traits; template struct pastix_traits< PastixLU<_MatrixType> > { typedef _MatrixType MatrixType; typedef typename _MatrixType::Scalar Scalar; typedef typename _MatrixType::RealScalar RealScalar; typedef typename _MatrixType::Index Index; }; template struct pastix_traits< PastixLLT<_MatrixType,Options> > { typedef _MatrixType MatrixType; typedef typename _MatrixType::Scalar Scalar; typedef typename _MatrixType::RealScalar RealScalar; typedef typename _MatrixType::Index Index; }; template struct pastix_traits< PastixLDLT<_MatrixType,Options> > { typedef _MatrixType MatrixType; typedef typename _MatrixType::Scalar Scalar; typedef typename _MatrixType::RealScalar RealScalar; typedef typename _MatrixType::Index Index; }; void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, float *vals, int *perm, int * invp, float *x, int nbrhs, int *iparm, double *dparm) { if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; } if (nbrhs == 0) {x = NULL; nbrhs=1;} s_pastix(pastix_data, pastix_comm, n, ptr, idx, vals, perm, invp, x, nbrhs, iparm, dparm); } void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, double *vals, int *perm, int * invp, double *x, int nbrhs, int *iparm, double *dparm) { if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; } if (nbrhs == 0) {x = NULL; nbrhs=1;} d_pastix(pastix_data, pastix_comm, n, ptr, idx, vals, perm, invp, x, nbrhs, iparm, dparm); } void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex *vals, int *perm, int * invp, std::complex *x, int nbrhs, int *iparm, double *dparm) { if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; } if (nbrhs == 0) {x = NULL; nbrhs=1;} c_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast(vals), perm, invp, reinterpret_cast(x), nbrhs, iparm, dparm); } void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex *vals, int *perm, int * invp, std::complex *x, int nbrhs, int *iparm, double *dparm) { if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; } if (nbrhs == 0) {x = NULL; nbrhs=1;} z_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast(vals), perm, invp, reinterpret_cast(x), nbrhs, iparm, dparm); } // Convert the matrix to Fortran-style Numbering template void c_to_fortran_numbering (MatrixType& mat) { if ( !(mat.outerIndexPtr()[0]) ) { int i; for(i = 0; i <= mat.rows(); ++i) ++mat.outerIndexPtr()[i]; for(i = 0; i < mat.nonZeros(); ++i) ++mat.innerIndexPtr()[i]; } } // Convert to C-style Numbering template void fortran_to_c_numbering (MatrixType& mat) { // Check the Numbering if ( mat.outerIndexPtr()[0] == 1 ) { // Convert to C-style numbering int i; for(i = 0; i <= mat.rows(); ++i) --mat.outerIndexPtr()[i]; for(i = 0; i < mat.nonZeros(); ++i) --mat.innerIndexPtr()[i]; } } } // This is the base class to interface with PaStiX functions. // Users should not used this class directly. template class PastixBase : internal::noncopyable { public: typedef typename internal::pastix_traits::MatrixType _MatrixType; typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; typedef Matrix Vector; typedef SparseMatrix ColSpMatrix; public: PastixBase() : m_initisOk(false), m_analysisIsOk(false), m_factorizationIsOk(false), m_isInitialized(false), m_pastixdata(0), m_size(0) { init(); } ~PastixBase() { clean(); } /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. * * \sa compute() */ template inline const internal::solve_retval solve(const MatrixBase& b) const { eigen_assert(m_isInitialized && "Pastix solver is not initialized."); eigen_assert(rows()==b.rows() && "PastixBase::solve(): invalid number of rows of the right hand side matrix b"); return internal::solve_retval(*this, b.derived()); } template bool _solve (const MatrixBase &b, MatrixBase &x) const; Derived& derived() { return *static_cast(this); } const Derived& derived() const { return *static_cast(this); } /** Returns a reference to the integer vector IPARM of PaStiX parameters * to modify the default parameters. * The statistics related to the different phases of factorization and solve are saved here as well * \sa analyzePattern() factorize() */ Array& iparm() { return m_iparm; } /** Return a reference to a particular index parameter of the IPARM vector * \sa iparm() */ int& iparm(int idxparam) { return m_iparm(idxparam); } /** Returns a reference to the double vector DPARM of PaStiX parameters * The statistics related to the different phases of factorization and solve are saved here as well * \sa analyzePattern() factorize() */ Array& dparm() { return m_dparm; } /** Return a reference to a particular index parameter of the DPARM vector * \sa dparm() */ double& dparm(int idxparam) { return m_dparm(idxparam); } inline Index cols() const { return m_size; } inline Index rows() const { return m_size; } /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was succesful, * \c NumericalIssue if the PaStiX reports a problem * \c InvalidInput if the input matrix is invalid * * \sa iparm() */ ComputationInfo info() const { eigen_assert(m_isInitialized && "Decomposition is not initialized."); return m_info; } /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. * * \sa compute() */ template inline const internal::sparse_solve_retval solve(const SparseMatrixBase& b) const { eigen_assert(m_isInitialized && "Pastix LU, LLT or LDLT is not initialized."); eigen_assert(rows()==b.rows() && "PastixBase::solve(): invalid number of rows of the right hand side matrix b"); return internal::sparse_solve_retval(*this, b.derived()); } protected: // Initialize the Pastix data structure, check the matrix void init(); // Compute the ordering and the symbolic factorization void analyzePattern(ColSpMatrix& mat); // Compute the numerical factorization void factorize(ColSpMatrix& mat); // Free all the data allocated by Pastix void clean() { eigen_assert(m_initisOk && "The Pastix structure should be allocated first"); m_iparm(IPARM_START_TASK) = API_TASK_CLEAN; m_iparm(IPARM_END_TASK) = API_TASK_CLEAN; internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, 0, 0, 0, (Scalar*)0, m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data()); } void compute(ColSpMatrix& mat); int m_initisOk; int m_analysisIsOk; int m_factorizationIsOk; bool m_isInitialized; mutable ComputationInfo m_info; mutable pastix_data_t *m_pastixdata; // Data structure for pastix mutable int m_comm; // The MPI communicator identifier mutable Matrix m_iparm; // integer vector for the input parameters mutable Matrix m_dparm; // Scalar vector for the input parameters mutable Matrix m_perm; // Permutation vector mutable Matrix m_invp; // Inverse permutation vector mutable int m_size; // Size of the matrix }; /** Initialize the PaStiX data structure. *A first call to this function fills iparm and dparm with the default PaStiX parameters * \sa iparm() dparm() */ template void PastixBase::init() { m_size = 0; m_iparm.setZero(IPARM_SIZE); m_dparm.setZero(DPARM_SIZE); m_iparm(IPARM_MODIFY_PARAMETER) = API_NO; pastix(&m_pastixdata, MPI_COMM_WORLD, 0, 0, 0, 0, 0, 0, 0, 1, m_iparm.data(), m_dparm.data()); m_iparm[IPARM_MATRIX_VERIFICATION] = API_NO; m_iparm[IPARM_VERBOSE] = 2; m_iparm[IPARM_ORDERING] = API_ORDER_SCOTCH; m_iparm[IPARM_INCOMPLETE] = API_NO; m_iparm[IPARM_OOC_LIMIT] = 2000; m_iparm[IPARM_RHS_MAKING] = API_RHS_B; m_iparm(IPARM_MATRIX_VERIFICATION) = API_NO; m_iparm(IPARM_START_TASK) = API_TASK_INIT; m_iparm(IPARM_END_TASK) = API_TASK_INIT; internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, 0, 0, 0, (Scalar*)0, 0, 0, 0, 0, m_iparm.data(), m_dparm.data()); // Check the returned error if(m_iparm(IPARM_ERROR_NUMBER)) { m_info = InvalidInput; m_initisOk = false; } else { m_info = Success; m_initisOk = true; } } template void PastixBase::compute(ColSpMatrix& mat) { eigen_assert(mat.rows() == mat.cols() && "The input matrix should be squared"); analyzePattern(mat); factorize(mat); m_iparm(IPARM_MATRIX_VERIFICATION) = API_NO; m_isInitialized = m_factorizationIsOk; } template void PastixBase::analyzePattern(ColSpMatrix& mat) { eigen_assert(m_initisOk && "The initialization of PaSTiX failed"); // clean previous calls if(m_size>0) clean(); m_size = mat.rows(); m_perm.resize(m_size); m_invp.resize(m_size); m_iparm(IPARM_START_TASK) = API_TASK_ORDERING; m_iparm(IPARM_END_TASK) = API_TASK_ANALYSE; internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, m_size, mat.outerIndexPtr(), mat.innerIndexPtr(), mat.valuePtr(), m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data()); // Check the returned error if(m_iparm(IPARM_ERROR_NUMBER)) { m_info = NumericalIssue; m_analysisIsOk = false; } else { m_info = Success; m_analysisIsOk = true; } } template void PastixBase::factorize(ColSpMatrix& mat) { // if(&m_cpyMat != &mat) m_cpyMat = mat; eigen_assert(m_analysisIsOk && "The analysis phase should be called before the factorization phase"); m_iparm(IPARM_START_TASK) = API_TASK_NUMFACT; m_iparm(IPARM_END_TASK) = API_TASK_NUMFACT; m_size = mat.rows(); internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, m_size, mat.outerIndexPtr(), mat.innerIndexPtr(), mat.valuePtr(), m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data()); // Check the returned error if(m_iparm(IPARM_ERROR_NUMBER)) { m_info = NumericalIssue; m_factorizationIsOk = false; m_isInitialized = false; } else { m_info = Success; m_factorizationIsOk = true; m_isInitialized = true; } } /* Solve the system */ template template bool PastixBase::_solve (const MatrixBase &b, MatrixBase &x) const { eigen_assert(m_isInitialized && "The matrix should be factorized first"); EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0, THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); int rhs = 1; x = b; /* on return, x is overwritten by the computed solution */ for (int i = 0; i < b.cols(); i++){ m_iparm[IPARM_START_TASK] = API_TASK_SOLVE; m_iparm[IPARM_END_TASK] = API_TASK_REFINE; internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, x.rows(), 0, 0, 0, m_perm.data(), m_invp.data(), &x(0, i), rhs, m_iparm.data(), m_dparm.data()); } // Check the returned error m_info = m_iparm(IPARM_ERROR_NUMBER)==0 ? Success : NumericalIssue; return m_iparm(IPARM_ERROR_NUMBER)==0; } /** \ingroup PaStiXSupport_Module * \class PastixLU * \brief Sparse direct LU solver based on PaStiX library * * This class is used to solve the linear systems A.X = B with a supernodal LU * factorization in the PaStiX library. The matrix A should be squared and nonsingular * PaStiX requires that the matrix A has a symmetric structural pattern. * This interface can symmetrize the input matrix otherwise. * The vectors or matrices X and B can be either dense or sparse. * * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> * \tparam IsStrSym Indicates if the input matrix has a symmetric pattern, default is false * NOTE : Note that if the analysis and factorization phase are called separately, * the input matrix will be symmetrized at each call, hence it is advised to * symmetrize the matrix in a end-user program and set \p IsStrSym to true * * \sa \ref TutorialSparseDirectSolvers * */ template class PastixLU : public PastixBase< PastixLU<_MatrixType> > { public: typedef _MatrixType MatrixType; typedef PastixBase > Base; typedef typename Base::ColSpMatrix ColSpMatrix; typedef typename MatrixType::Index Index; public: PastixLU() : Base() { init(); } PastixLU(const MatrixType& matrix):Base() { init(); compute(matrix); } /** Compute the LU supernodal factorization of \p matrix. * iparm and dparm can be used to tune the PaStiX parameters. * see the PaStiX user's manual * \sa analyzePattern() factorize() */ void compute (const MatrixType& matrix) { m_structureIsUptodate = false; ColSpMatrix temp; grabMatrix(matrix, temp); Base::compute(temp); } /** Compute the LU symbolic factorization of \p matrix using its sparsity pattern. * Several ordering methods can be used at this step. See the PaStiX user's manual. * The result of this operation can be used with successive matrices having the same pattern as \p matrix * \sa factorize() */ void analyzePattern(const MatrixType& matrix) { m_structureIsUptodate = false; ColSpMatrix temp; grabMatrix(matrix, temp); Base::analyzePattern(temp); } /** Compute the LU supernodal factorization of \p matrix * WARNING The matrix \p matrix should have the same structural pattern * as the same used in the analysis phase. * \sa analyzePattern() */ void factorize(const MatrixType& matrix) { ColSpMatrix temp; grabMatrix(matrix, temp); Base::factorize(temp); } protected: void init() { m_structureIsUptodate = false; m_iparm(IPARM_SYM) = API_SYM_NO; m_iparm(IPARM_FACTORIZATION) = API_FACT_LU; } void grabMatrix(const MatrixType& matrix, ColSpMatrix& out) { if(IsStrSym) out = matrix; else { if(!m_structureIsUptodate) { // update the transposed structure m_transposedStructure = matrix.transpose(); // Set the elements of the matrix to zero for (Index j=0; j * \tparam UpLo The part of the matrix to use : Lower or Upper. The default is Lower as required by PaStiX * * \sa \ref TutorialSparseDirectSolvers */ template class PastixLLT : public PastixBase< PastixLLT<_MatrixType, _UpLo> > { public: typedef _MatrixType MatrixType; typedef PastixBase > Base; typedef typename Base::ColSpMatrix ColSpMatrix; public: enum { UpLo = _UpLo }; PastixLLT() : Base() { init(); } PastixLLT(const MatrixType& matrix):Base() { init(); compute(matrix); } /** Compute the L factor of the LL^T supernodal factorization of \p matrix * \sa analyzePattern() factorize() */ void compute (const MatrixType& matrix) { ColSpMatrix temp; grabMatrix(matrix, temp); Base::compute(temp); } /** Compute the LL^T symbolic factorization of \p matrix using its sparsity pattern * The result of this operation can be used with successive matrices having the same pattern as \p matrix * \sa factorize() */ void analyzePattern(const MatrixType& matrix) { ColSpMatrix temp; grabMatrix(matrix, temp); Base::analyzePattern(temp); } /** Compute the LL^T supernodal numerical factorization of \p matrix * \sa analyzePattern() */ void factorize(const MatrixType& matrix) { ColSpMatrix temp; grabMatrix(matrix, temp); Base::factorize(temp); } protected: using Base::m_iparm; void init() { m_iparm(IPARM_SYM) = API_SYM_YES; m_iparm(IPARM_FACTORIZATION) = API_FACT_LLT; } void grabMatrix(const MatrixType& matrix, ColSpMatrix& out) { // Pastix supports only lower, column-major matrices out.template selfadjointView() = matrix.template selfadjointView(); internal::c_to_fortran_numbering(out); } }; /** \ingroup PaStiXSupport_Module * \class PastixLDLT * \brief A sparse direct supernodal Cholesky (LLT) factorization and solver based on the PaStiX library * * This class is used to solve the linear systems A.X = B via a LDL^T supernodal Cholesky factorization * available in the PaStiX library. The matrix A should be symmetric and positive definite * WARNING Selfadjoint complex matrices are not supported in the current version of PaStiX * The vectors or matrices X and B can be either dense or sparse * * \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> * \tparam UpLo The part of the matrix to use : Lower or Upper. The default is Lower as required by PaStiX * * \sa \ref TutorialSparseDirectSolvers */ template class PastixLDLT : public PastixBase< PastixLDLT<_MatrixType, _UpLo> > { public: typedef _MatrixType MatrixType; typedef PastixBase > Base; typedef typename Base::ColSpMatrix ColSpMatrix; public: enum { UpLo = _UpLo }; PastixLDLT():Base() { init(); } PastixLDLT(const MatrixType& matrix):Base() { init(); compute(matrix); } /** Compute the L and D factors of the LDL^T factorization of \p matrix * \sa analyzePattern() factorize() */ void compute (const MatrixType& matrix) { ColSpMatrix temp; grabMatrix(matrix, temp); Base::compute(temp); } /** Compute the LDL^T symbolic factorization of \p matrix using its sparsity pattern * The result of this operation can be used with successive matrices having the same pattern as \p matrix * \sa factorize() */ void analyzePattern(const MatrixType& matrix) { ColSpMatrix temp; grabMatrix(matrix, temp); Base::analyzePattern(temp); } /** Compute the LDL^T supernodal numerical factorization of \p matrix * */ void factorize(const MatrixType& matrix) { ColSpMatrix temp; grabMatrix(matrix, temp); Base::factorize(temp); } protected: using Base::m_iparm; void init() { m_iparm(IPARM_SYM) = API_SYM_YES; m_iparm(IPARM_FACTORIZATION) = API_FACT_LDLT; } void grabMatrix(const MatrixType& matrix, ColSpMatrix& out) { // Pastix supports only lower, column-major matrices out.template selfadjointView() = matrix.template selfadjointView(); internal::c_to_fortran_numbering(out); } }; namespace internal { template struct solve_retval, Rhs> : solve_retval_base, Rhs> { typedef PastixBase<_MatrixType> Dec; EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) template void evalTo(Dest& dst) const { dec()._solve(rhs(),dst); } }; template struct sparse_solve_retval, Rhs> : sparse_solve_retval_base, Rhs> { typedef PastixBase<_MatrixType> Dec; EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs) template void evalTo(Dest& dst) const { this->defaultEvalTo(dst); } }; } // end namespace internal } // end namespace Eigen #endif antimony/lib/fab/vendor/Eigen/src/SPQRSupport/0000755000175000017500000000000013341130426020474 5ustar tiagotiagoantimony/lib/fab/vendor/Eigen/src/SPQRSupport/CMakeLists.txt0000644000175000017500000000024313341130426023233 0ustar tiagotiagoFILE(GLOB Eigen_SPQRSupport_SRCS "*.h") INSTALL(FILES ${Eigen_SPQRSupport_SRCS} DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/SPQRSupport/ COMPONENT Devel ) antimony/lib/fab/vendor/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h0000644000175000017500000002527013341130426024762 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Desire Nuentsa // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SUITESPARSEQRSUPPORT_H #define EIGEN_SUITESPARSEQRSUPPORT_H namespace Eigen { template class SPQR; template struct SPQRMatrixQReturnType; template struct SPQRMatrixQTransposeReturnType; template struct SPQR_QProduct; namespace internal { template struct traits > { typedef typename SPQRType::MatrixType ReturnType; }; template struct traits > { typedef typename SPQRType::MatrixType ReturnType; }; template struct traits > { typedef typename Derived::PlainObject ReturnType; }; } // End namespace internal /** * \ingroup SPQRSupport_Module * \class SPQR * \brief Sparse QR factorization based on SuiteSparseQR library * * This class is used to perform a multithreaded and multifrontal rank-revealing QR decomposition * of sparse matrices. The result is then used to solve linear leasts_square systems. * Clearly, a QR factorization is returned such that A*P = Q*R where : * * P is the column permutation. Use colsPermutation() to get it. * * Q is the orthogonal matrix represented as Householder reflectors. * Use matrixQ() to get an expression and matrixQ().transpose() to get the transpose. * You can then apply it to a vector. * * R is the sparse triangular factor. Use matrixQR() to get it as SparseMatrix. * NOTE : The Index type of R is always UF_long. You can get it with SPQR::Index * * \tparam _MatrixType The type of the sparse matrix A, must be a column-major SparseMatrix<> * NOTE * */ template class SPQR { public: typedef typename _MatrixType::Scalar Scalar; typedef typename _MatrixType::RealScalar RealScalar; typedef UF_long Index ; typedef SparseMatrix MatrixType; typedef PermutationMatrix PermutationType; public: SPQR() : m_isInitialized(false), m_ordering(SPQR_ORDERING_DEFAULT), m_allow_tol(SPQR_DEFAULT_TOL), m_tolerance (NumTraits::epsilon()) { cholmod_l_start(&m_cc); } SPQR(const _MatrixType& matrix) : m_isInitialized(false), m_ordering(SPQR_ORDERING_DEFAULT), m_allow_tol(SPQR_DEFAULT_TOL), m_tolerance (NumTraits::epsilon()) { cholmod_l_start(&m_cc); compute(matrix); } ~SPQR() { SPQR_free(); cholmod_l_finish(&m_cc); } void SPQR_free() { cholmod_l_free_sparse(&m_H, &m_cc); cholmod_l_free_sparse(&m_cR, &m_cc); cholmod_l_free_dense(&m_HTau, &m_cc); std::free(m_E); std::free(m_HPinv); } void compute(const _MatrixType& matrix) { if(m_isInitialized) SPQR_free(); MatrixType mat(matrix); cholmod_sparse A; A = viewAsCholmod(mat); Index col = matrix.cols(); m_rank = SuiteSparseQR(m_ordering, m_tolerance, col, &A, &m_cR, &m_E, &m_H, &m_HPinv, &m_HTau, &m_cc); if (!m_cR) { m_info = NumericalIssue; m_isInitialized = false; return; } m_info = Success; m_isInitialized = true; m_isRUpToDate = false; } /** * Get the number of rows of the input matrix and the Q matrix */ inline Index rows() const {return m_H->nrow; } /** * Get the number of columns of the input matrix. */ inline Index cols() const { return m_cR->ncol; } /** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A. * * \sa compute() */ template inline const internal::solve_retval solve(const MatrixBase& B) const { eigen_assert(m_isInitialized && " The QR factorization should be computed first, call compute()"); eigen_assert(this->rows()==B.rows() && "SPQR::solve(): invalid number of rows of the right hand side matrix B"); return internal::solve_retval(*this, B.derived()); } template void _solve(const MatrixBase &b, MatrixBase &dest) const { eigen_assert(m_isInitialized && " The QR factorization should be computed first, call compute()"); eigen_assert(b.cols()==1 && "This method is for vectors only"); //Compute Q^T * b typename Dest::PlainObject y; y = matrixQ().transpose() * b; // Solves with the triangular matrix R Index rk = this->rank(); y.topRows(rk) = this->matrixR().topLeftCorner(rk, rk).template triangularView().solve(y.topRows(rk)); y.bottomRows(cols()-rk).setZero(); // Apply the column permutation dest.topRows(cols()) = colsPermutation() * y.topRows(cols()); m_info = Success; } /** \returns the sparse triangular factor R. It is a sparse matrix */ const MatrixType matrixR() const { eigen_assert(m_isInitialized && " The QR factorization should be computed first, call compute()"); if(!m_isRUpToDate) { m_R = viewAsEigen(*m_cR); m_isRUpToDate = true; } return m_R; } /// Get an expression of the matrix Q SPQRMatrixQReturnType matrixQ() const { return SPQRMatrixQReturnType(*this); } /// Get the permutation that was applied to columns of A PermutationType colsPermutation() const { eigen_assert(m_isInitialized && "Decomposition is not initialized."); Index n = m_cR->ncol; PermutationType colsPerm(n); for(Index j = 0; j friend struct SPQR_QProduct; }; template struct SPQR_QProduct : ReturnByValue > { typedef typename SPQRType::Scalar Scalar; typedef typename SPQRType::Index Index; //Define the constructor to get reference to argument types SPQR_QProduct(const SPQRType& spqr, const Derived& other, bool transpose) : m_spqr(spqr),m_other(other),m_transpose(transpose) {} inline Index rows() const { return m_transpose ? m_spqr.rows() : m_spqr.cols(); } inline Index cols() const { return m_other.cols(); } // Assign to a vector template void evalTo(ResType& res) const { cholmod_dense y_cd; cholmod_dense *x_cd; int method = m_transpose ? SPQR_QTX : SPQR_QX; cholmod_common *cc = m_spqr.cholmodCommon(); y_cd = viewAsCholmod(m_other.const_cast_derived()); x_cd = SuiteSparseQR_qmult(method, m_spqr.m_H, m_spqr.m_HTau, m_spqr.m_HPinv, &y_cd, cc); res = Matrix::Map(reinterpret_cast(x_cd->x), x_cd->nrow, x_cd->ncol); cholmod_l_free_dense(&x_cd, cc); } const SPQRType& m_spqr; const Derived& m_other; bool m_transpose; }; template struct SPQRMatrixQReturnType{ SPQRMatrixQReturnType(const SPQRType& spqr) : m_spqr(spqr) {} template SPQR_QProduct operator*(const MatrixBase& other) { return SPQR_QProduct(m_spqr,other.derived(),false); } SPQRMatrixQTransposeReturnType adjoint() const { return SPQRMatrixQTransposeReturnType(m_spqr); } // To use for operations with the transpose of Q SPQRMatrixQTransposeReturnType transpose() const { return SPQRMatrixQTransposeReturnType(m_spqr); } const SPQRType& m_spqr; }; template struct SPQRMatrixQTransposeReturnType{ SPQRMatrixQTransposeReturnType(const SPQRType& spqr) : m_spqr(spqr) {} template SPQR_QProduct operator*(const MatrixBase& other) { return SPQR_QProduct(m_spqr,other.derived(), true); } const SPQRType& m_spqr; }; namespace internal { template struct solve_retval, Rhs> : solve_retval_base, Rhs> { typedef SPQR<_MatrixType> Dec; EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) template void evalTo(Dest& dst) const { dec()._solve(rhs(),dst); } }; } // end namespace internal }// End namespace Eigen #endif antimony/lib/fab/vendor/Eigen/src/Core/0000755000175000017500000000000013341130426017202 5ustar tiagotiagoantimony/lib/fab/vendor/Eigen/src/Core/CMakeLists.txt0000644000175000017500000000032713341130426021744 0ustar tiagotiagoFILE(GLOB Eigen_Core_SRCS "*.h") INSTALL(FILES ${Eigen_Core_SRCS} DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Core COMPONENT Devel ) ADD_SUBDIRECTORY(products) ADD_SUBDIRECTORY(util) ADD_SUBDIRECTORY(arch) antimony/lib/fab/vendor/Eigen/src/Core/DenseBase.h0000644000175000017500000005454613341130426021222 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2007-2010 Benoit Jacob // Copyright (C) 2008-2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_DENSEBASE_H #define EIGEN_DENSEBASE_H namespace Eigen { namespace internal { // The index type defined by EIGEN_DEFAULT_DENSE_INDEX_TYPE must be a signed type. // This dummy function simply aims at checking that at compile time. static inline void check_DenseIndex_is_signed() { EIGEN_STATIC_ASSERT(NumTraits::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE); } } // end namespace internal /** \class DenseBase * \ingroup Core_Module * * \brief Base class for all dense matrices, vectors, and arrays * * This class is the base that is inherited by all dense objects (matrix, vector, arrays, * and related expression types). The common Eigen API for dense objects is contained in this class. * * \tparam Derived is the derived type, e.g., a matrix type or an expression. * * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_DENSEBASE_PLUGIN. * * \sa \ref TopicClassHierarchy */ template class DenseBase #ifndef EIGEN_PARSED_BY_DOXYGEN : public internal::special_scalar_op_base::Scalar, typename NumTraits::Scalar>::Real> #else : public DenseCoeffsBase #endif // not EIGEN_PARSED_BY_DOXYGEN { public: using internal::special_scalar_op_base::Scalar, typename NumTraits::Scalar>::Real>::operator*; class InnerIterator; typedef typename internal::traits::StorageKind StorageKind; /** \brief The type of indices * \details To change this, \c \#define the preprocessor symbol \c EIGEN_DEFAULT_DENSE_INDEX_TYPE. * \sa \ref TopicPreprocessorDirectives. */ typedef typename internal::traits::Index Index; typedef typename internal::traits::Scalar Scalar; typedef typename internal::packet_traits::type PacketScalar; typedef typename NumTraits::Real RealScalar; typedef DenseCoeffsBase Base; using Base::derived; using Base::const_cast_derived; using Base::rows; using Base::cols; using Base::size; using Base::rowIndexByOuterInner; using Base::colIndexByOuterInner; using Base::coeff; using Base::coeffByOuterInner; using Base::packet; using Base::packetByOuterInner; using Base::writePacket; using Base::writePacketByOuterInner; using Base::coeffRef; using Base::coeffRefByOuterInner; using Base::copyCoeff; using Base::copyCoeffByOuterInner; using Base::copyPacket; using Base::copyPacketByOuterInner; using Base::operator(); using Base::operator[]; using Base::x; using Base::y; using Base::z; using Base::w; using Base::stride; using Base::innerStride; using Base::outerStride; using Base::rowStride; using Base::colStride; typedef typename Base::CoeffReturnType CoeffReturnType; enum { RowsAtCompileTime = internal::traits::RowsAtCompileTime, /**< The number of rows at compile-time. This is just a copy of the value provided * by the \a Derived type. If a value is not known at compile-time, * it is set to the \a Dynamic constant. * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */ ColsAtCompileTime = internal::traits::ColsAtCompileTime, /**< The number of columns at compile-time. This is just a copy of the value provided * by the \a Derived type. If a value is not known at compile-time, * it is set to the \a Dynamic constant. * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */ SizeAtCompileTime = (internal::size_at_compile_time::RowsAtCompileTime, internal::traits::ColsAtCompileTime>::ret), /**< This is equal to the number of coefficients, i.e. the number of * rows times the number of columns, or to \a Dynamic if this is not * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */ MaxRowsAtCompileTime = internal::traits::MaxRowsAtCompileTime, /**< This value is equal to the maximum possible number of rows that this expression * might have. If this expression might have an arbitrarily high number of rows, * this value is set to \a Dynamic. * * This value is useful to know when evaluating an expression, in order to determine * whether it is possible to avoid doing a dynamic memory allocation. * * \sa RowsAtCompileTime, MaxColsAtCompileTime, MaxSizeAtCompileTime */ MaxColsAtCompileTime = internal::traits::MaxColsAtCompileTime, /**< This value is equal to the maximum possible number of columns that this expression * might have. If this expression might have an arbitrarily high number of columns, * this value is set to \a Dynamic. * * This value is useful to know when evaluating an expression, in order to determine * whether it is possible to avoid doing a dynamic memory allocation. * * \sa ColsAtCompileTime, MaxRowsAtCompileTime, MaxSizeAtCompileTime */ MaxSizeAtCompileTime = (internal::size_at_compile_time::MaxRowsAtCompileTime, internal::traits::MaxColsAtCompileTime>::ret), /**< This value is equal to the maximum possible number of coefficients that this expression * might have. If this expression might have an arbitrarily high number of coefficients, * this value is set to \a Dynamic. * * This value is useful to know when evaluating an expression, in order to determine * whether it is possible to avoid doing a dynamic memory allocation. * * \sa SizeAtCompileTime, MaxRowsAtCompileTime, MaxColsAtCompileTime */ IsVectorAtCompileTime = internal::traits::MaxRowsAtCompileTime == 1 || internal::traits::MaxColsAtCompileTime == 1, /**< This is set to true if either the number of rows or the number of * columns is known at compile-time to be equal to 1. Indeed, in that case, * we are dealing with a column-vector (if there is only one column) or with * a row-vector (if there is only one row). */ Flags = internal::traits::Flags, /**< This stores expression \ref flags flags which may or may not be inherited by new expressions * constructed from this one. See the \ref flags "list of flags". */ IsRowMajor = int(Flags) & RowMajorBit, /**< True if this expression has row-major storage order. */ InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? int(SizeAtCompileTime) : int(IsRowMajor) ? int(ColsAtCompileTime) : int(RowsAtCompileTime), CoeffReadCost = internal::traits::CoeffReadCost, /**< This is a rough measure of how expensive it is to read one coefficient from * this expression. */ InnerStrideAtCompileTime = internal::inner_stride_at_compile_time::ret, OuterStrideAtCompileTime = internal::outer_stride_at_compile_time::ret }; enum { ThisConstantIsPrivateInPlainObjectBase }; /** \returns the number of nonzero coefficients which is in practice the number * of stored coefficients. */ inline Index nonZeros() const { return size(); } /** \returns true if either the number of rows or the number of columns is equal to 1. * In other words, this function returns * \code rows()==1 || cols()==1 \endcode * \sa rows(), cols(), IsVectorAtCompileTime. */ /** \returns the outer size. * * \note For a vector, this returns just 1. For a matrix (non-vector), this is the major dimension * with respect to the \ref TopicStorageOrders "storage order", i.e., the number of columns for a * column-major matrix, and the number of rows for a row-major matrix. */ Index outerSize() const { return IsVectorAtCompileTime ? 1 : int(IsRowMajor) ? this->rows() : this->cols(); } /** \returns the inner size. * * \note For a vector, this is just the size. For a matrix (non-vector), this is the minor dimension * with respect to the \ref TopicStorageOrders "storage order", i.e., the number of rows for a * column-major matrix, and the number of columns for a row-major matrix. */ Index innerSize() const { return IsVectorAtCompileTime ? this->size() : int(IsRowMajor) ? this->cols() : this->rows(); } /** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does * nothing else. */ void resize(Index newSize) { EIGEN_ONLY_USED_FOR_DEBUG(newSize); eigen_assert(newSize == this->size() && "DenseBase::resize() does not actually allow to resize."); } /** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does * nothing else. */ void resize(Index nbRows, Index nbCols) { EIGEN_ONLY_USED_FOR_DEBUG(nbRows); EIGEN_ONLY_USED_FOR_DEBUG(nbCols); eigen_assert(nbRows == this->rows() && nbCols == this->cols() && "DenseBase::resize() does not actually allow to resize."); } #ifndef EIGEN_PARSED_BY_DOXYGEN /** \internal Represents a matrix with all coefficients equal to one another*/ typedef CwiseNullaryOp,Derived> ConstantReturnType; /** \internal Represents a vector with linearly spaced coefficients that allows sequential access only. */ typedef CwiseNullaryOp,Derived> SequentialLinSpacedReturnType; /** \internal Represents a vector with linearly spaced coefficients that allows random access. */ typedef CwiseNullaryOp,Derived> RandomAccessLinSpacedReturnType; /** \internal the return type of MatrixBase::eigenvalues() */ typedef Matrix::Scalar>::Real, internal::traits::ColsAtCompileTime, 1> EigenvaluesReturnType; #endif // not EIGEN_PARSED_BY_DOXYGEN /** Copies \a other into *this. \returns a reference to *this. */ template Derived& operator=(const DenseBase& other); /** Special case of the template operator=, in order to prevent the compiler * from generating a default operator= (issue hit with g++ 4.1) */ Derived& operator=(const DenseBase& other); template Derived& operator=(const EigenBase &other); template Derived& operator+=(const EigenBase &other); template Derived& operator-=(const EigenBase &other); template Derived& operator=(const ReturnByValue& func); #ifndef EIGEN_PARSED_BY_DOXYGEN /** Copies \a other into *this without evaluating other. \returns a reference to *this. */ template Derived& lazyAssign(const DenseBase& other); #endif // not EIGEN_PARSED_BY_DOXYGEN CommaInitializer operator<< (const Scalar& s); template const Flagged flagged() const; template CommaInitializer operator<< (const DenseBase& other); Eigen::Transpose transpose(); typedef typename internal::add_const >::type ConstTransposeReturnType; ConstTransposeReturnType transpose() const; void transposeInPlace(); #ifndef EIGEN_NO_DEBUG protected: template void checkTransposeAliasing(const OtherDerived& other) const; public: #endif static const ConstantReturnType Constant(Index rows, Index cols, const Scalar& value); static const ConstantReturnType Constant(Index size, const Scalar& value); static const ConstantReturnType Constant(const Scalar& value); static const SequentialLinSpacedReturnType LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high); static const RandomAccessLinSpacedReturnType LinSpaced(Index size, const Scalar& low, const Scalar& high); static const SequentialLinSpacedReturnType LinSpaced(Sequential_t, const Scalar& low, const Scalar& high); static const RandomAccessLinSpacedReturnType LinSpaced(const Scalar& low, const Scalar& high); template static const CwiseNullaryOp NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func); template static const CwiseNullaryOp NullaryExpr(Index size, const CustomNullaryOp& func); template static const CwiseNullaryOp NullaryExpr(const CustomNullaryOp& func); static const ConstantReturnType Zero(Index rows, Index cols); static const ConstantReturnType Zero(Index size); static const ConstantReturnType Zero(); static const ConstantReturnType Ones(Index rows, Index cols); static const ConstantReturnType Ones(Index size); static const ConstantReturnType Ones(); void fill(const Scalar& value); Derived& setConstant(const Scalar& value); Derived& setLinSpaced(Index size, const Scalar& low, const Scalar& high); Derived& setLinSpaced(const Scalar& low, const Scalar& high); Derived& setZero(); Derived& setOnes(); Derived& setRandom(); template bool isApprox(const DenseBase& other, const RealScalar& prec = NumTraits::dummy_precision()) const; bool isMuchSmallerThan(const RealScalar& other, const RealScalar& prec = NumTraits::dummy_precision()) const; template bool isMuchSmallerThan(const DenseBase& other, const RealScalar& prec = NumTraits::dummy_precision()) const; bool isApproxToConstant(const Scalar& value, const RealScalar& prec = NumTraits::dummy_precision()) const; bool isConstant(const Scalar& value, const RealScalar& prec = NumTraits::dummy_precision()) const; bool isZero(const RealScalar& prec = NumTraits::dummy_precision()) const; bool isOnes(const RealScalar& prec = NumTraits::dummy_precision()) const; inline bool hasNaN() const; inline bool allFinite() const; inline Derived& operator*=(const Scalar& other); inline Derived& operator/=(const Scalar& other); typedef typename internal::add_const_on_value_type::type>::type EvalReturnType; /** \returns the matrix or vector obtained by evaluating this expression. * * Notice that in the case of a plain matrix or vector (not an expression) this function just returns * a const reference, in order to avoid a useless copy. */ EIGEN_STRONG_INLINE EvalReturnType eval() const { // Even though MSVC does not honor strong inlining when the return type // is a dynamic matrix, we desperately need strong inlining for fixed // size types on MSVC. return typename internal::eval::type(derived()); } /** swaps *this with the expression \a other. * */ template void swap(const DenseBase& other, int = OtherDerived::ThisConstantIsPrivateInPlainObjectBase) { SwapWrapper(derived()).lazyAssign(other.derived()); } /** swaps *this with the matrix or array \a other. * */ template void swap(PlainObjectBase& other) { SwapWrapper(derived()).lazyAssign(other.derived()); } inline const NestByValue nestByValue() const; inline const ForceAlignedAccess forceAlignedAccess() const; inline ForceAlignedAccess forceAlignedAccess(); template inline const typename internal::conditional,Derived&>::type forceAlignedAccessIf() const; template inline typename internal::conditional,Derived&>::type forceAlignedAccessIf(); Scalar sum() const; Scalar mean() const; Scalar trace() const; Scalar prod() const; typename internal::traits::Scalar minCoeff() const; typename internal::traits::Scalar maxCoeff() const; template typename internal::traits::Scalar minCoeff(IndexType* row, IndexType* col) const; template typename internal::traits::Scalar maxCoeff(IndexType* row, IndexType* col) const; template typename internal::traits::Scalar minCoeff(IndexType* index) const; template typename internal::traits::Scalar maxCoeff(IndexType* index) const; template typename internal::result_of::Scalar)>::type redux(const BinaryOp& func) const; template void visit(Visitor& func) const; inline const WithFormat format(const IOFormat& fmt) const; /** \returns the unique coefficient of a 1x1 expression */ CoeffReturnType value() const { EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) eigen_assert(this->rows() == 1 && this->cols() == 1); return derived().coeff(0,0); } bool all(void) const; bool any(void) const; Index count() const; typedef VectorwiseOp RowwiseReturnType; typedef const VectorwiseOp ConstRowwiseReturnType; typedef VectorwiseOp ColwiseReturnType; typedef const VectorwiseOp ConstColwiseReturnType; ConstRowwiseReturnType rowwise() const; RowwiseReturnType rowwise(); ConstColwiseReturnType colwise() const; ColwiseReturnType colwise(); static const CwiseNullaryOp,Derived> Random(Index rows, Index cols); static const CwiseNullaryOp,Derived> Random(Index size); static const CwiseNullaryOp,Derived> Random(); template const Select select(const DenseBase& thenMatrix, const DenseBase& elseMatrix) const; template inline const Select select(const DenseBase& thenMatrix, const typename ThenDerived::Scalar& elseScalar) const; template inline const Select select(const typename ElseDerived::Scalar& thenScalar, const DenseBase& elseMatrix) const; template RealScalar lpNorm() const; template inline const Replicate replicate() const; typedef Replicate ReplicateReturnType; inline const ReplicateReturnType replicate(Index rowFacor,Index colFactor) const; typedef Reverse ReverseReturnType; typedef const Reverse ConstReverseReturnType; ReverseReturnType reverse(); ConstReverseReturnType reverse() const; void reverseInPlace(); #define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::DenseBase # include "../plugins/BlockMethods.h" # ifdef EIGEN_DENSEBASE_PLUGIN # include EIGEN_DENSEBASE_PLUGIN # endif #undef EIGEN_CURRENT_STORAGE_BASE_CLASS #ifdef EIGEN2_SUPPORT Block corner(CornerType type, Index cRows, Index cCols); const Block corner(CornerType type, Index cRows, Index cCols) const; template Block corner(CornerType type); template const Block corner(CornerType type) const; #endif // EIGEN2_SUPPORT // disable the use of evalTo for dense objects with a nice compilation error template inline void evalTo(Dest& ) const { EIGEN_STATIC_ASSERT((internal::is_same::value),THE_EVAL_EVALTO_FUNCTION_SHOULD_NEVER_BE_CALLED_FOR_DENSE_OBJECTS); } protected: /** Default constructor. Do nothing. */ DenseBase() { /* Just checks for self-consistency of the flags. * Only do it when debugging Eigen, as this borders on paranoiac and could slow compilation down */ #ifdef EIGEN_INTERNAL_DEBUGGING EIGEN_STATIC_ASSERT((EIGEN_IMPLIES(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, int(IsRowMajor)) && EIGEN_IMPLIES(MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1, int(!IsRowMajor))), INVALID_STORAGE_ORDER_FOR_THIS_VECTOR_EXPRESSION) #endif } private: explicit DenseBase(int); DenseBase(int,int); template explicit DenseBase(const DenseBase&); }; } // end namespace Eigen #endif // EIGEN_DENSEBASE_H antimony/lib/fab/vendor/Eigen/src/Core/CommaInitializer.h0000644000175000017500000001270613341130426022621 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // Copyright (C) 2006-2008 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_COMMAINITIALIZER_H #define EIGEN_COMMAINITIALIZER_H namespace Eigen { /** \class CommaInitializer * \ingroup Core_Module * * \brief Helper class used by the comma initializer operator * * This class is internally used to implement the comma initializer feature. It is * the return type of MatrixBase::operator<<, and most of the time this is the only * way it is used. * * \sa \ref MatrixBaseCommaInitRef "MatrixBase::operator<<", CommaInitializer::finished() */ template struct CommaInitializer { typedef typename XprType::Scalar Scalar; typedef typename XprType::Index Index; inline CommaInitializer(XprType& xpr, const Scalar& s) : m_xpr(xpr), m_row(0), m_col(1), m_currentBlockRows(1) { m_xpr.coeffRef(0,0) = s; } template inline CommaInitializer(XprType& xpr, const DenseBase& other) : m_xpr(xpr), m_row(0), m_col(other.cols()), m_currentBlockRows(other.rows()) { m_xpr.block(0, 0, other.rows(), other.cols()) = other; } /* Copy/Move constructor which transfers ownership. This is crucial in * absence of return value optimization to avoid assertions during destruction. */ // FIXME in C++11 mode this could be replaced by a proper RValue constructor inline CommaInitializer(const CommaInitializer& o) : m_xpr(o.m_xpr), m_row(o.m_row), m_col(o.m_col), m_currentBlockRows(o.m_currentBlockRows) { // Mark original object as finished. In absence of R-value references we need to const_cast: const_cast(o).m_row = m_xpr.rows(); const_cast(o).m_col = m_xpr.cols(); const_cast(o).m_currentBlockRows = 0; } /* inserts a scalar value in the target matrix */ CommaInitializer& operator,(const Scalar& s) { if (m_col==m_xpr.cols()) { m_row+=m_currentBlockRows; m_col = 0; m_currentBlockRows = 1; eigen_assert(m_row CommaInitializer& operator,(const DenseBase& other) { if(other.cols()==0 || other.rows()==0) return *this; if (m_col==m_xpr.cols()) { m_row+=m_currentBlockRows; m_col = 0; m_currentBlockRows = other.rows(); eigen_assert(m_row+m_currentBlockRows<=m_xpr.rows() && "Too many rows passed to comma initializer (operator<<)"); } eigen_assert(m_col (m_row, m_col) = other; else m_xpr.block(m_row, m_col, other.rows(), other.cols()) = other; m_col += other.cols(); return *this; } inline ~CommaInitializer() { eigen_assert((m_row+m_currentBlockRows) == m_xpr.rows() && m_col == m_xpr.cols() && "Too few coefficients passed to comma initializer (operator<<)"); } /** \returns the built matrix once all its coefficients have been set. * Calling finished is 100% optional. Its purpose is to write expressions * like this: * \code * quaternion.fromRotationMatrix((Matrix3f() << axis0, axis1, axis2).finished()); * \endcode */ inline XprType& finished() { return m_xpr; } XprType& m_xpr; // target expression Index m_row; // current row id Index m_col; // current col id Index m_currentBlockRows; // current block height }; /** \anchor MatrixBaseCommaInitRef * Convenient operator to set the coefficients of a matrix. * * The coefficients must be provided in a row major order and exactly match * the size of the matrix. Otherwise an assertion is raised. * * Example: \include MatrixBase_set.cpp * Output: \verbinclude MatrixBase_set.out * * \note According the c++ standard, the argument expressions of this comma initializer are evaluated in arbitrary order. * * \sa CommaInitializer::finished(), class CommaInitializer */ template inline CommaInitializer DenseBase::operator<< (const Scalar& s) { return CommaInitializer(*static_cast(this), s); } /** \sa operator<<(const Scalar&) */ template template inline CommaInitializer DenseBase::operator<<(const DenseBase& other) { return CommaInitializer(*static_cast(this), other); } } // end namespace Eigen #endif // EIGEN_COMMAINITIALIZER_H antimony/lib/fab/vendor/Eigen/src/Core/PlainObjectBase.h0000644000175000017500000010444213341130426022345 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // Copyright (C) 2006-2008 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_DENSESTORAGEBASE_H #define EIGEN_DENSESTORAGEBASE_H #if defined(EIGEN_INITIALIZE_MATRICES_BY_ZERO) # define EIGEN_INITIALIZE_COEFFS # define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED for(int i=0;i::quiet_NaN(); #else # undef EIGEN_INITIALIZE_COEFFS # define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED #endif namespace Eigen { namespace internal { template struct check_rows_cols_for_overflow { template static EIGEN_ALWAYS_INLINE void run(Index, Index) { } }; template<> struct check_rows_cols_for_overflow { template static EIGEN_ALWAYS_INLINE void run(Index rows, Index cols) { // http://hg.mozilla.org/mozilla-central/file/6c8a909977d3/xpcom/ds/CheckedInt.h#l242 // we assume Index is signed Index max_index = (size_t(1) << (8 * sizeof(Index) - 1)) - 1; // assume Index is signed bool error = (rows == 0 || cols == 0) ? false : (rows > max_index / cols); if (error) throw_std_bad_alloc(); } }; template struct conservative_resize_like_impl; template struct matrix_swap_impl; } // end namespace internal /** \class PlainObjectBase * \brief %Dense storage base class for matrices and arrays. * * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_PLAINOBJECTBASE_PLUGIN. * * \sa \ref TopicClassHierarchy */ #ifdef EIGEN_PARSED_BY_DOXYGEN namespace internal { // this is a warkaround to doxygen not being able to understand the inheritence logic // when it is hidden by the dense_xpr_base helper struct. template struct dense_xpr_base_dispatcher_for_doxygen;// : public MatrixBase {}; /** This class is just a workaround for Doxygen and it does not not actually exist. */ template struct dense_xpr_base_dispatcher_for_doxygen > : public MatrixBase > {}; /** This class is just a workaround for Doxygen and it does not not actually exist. */ template struct dense_xpr_base_dispatcher_for_doxygen > : public ArrayBase > {}; } // namespace internal template class PlainObjectBase : public internal::dense_xpr_base_dispatcher_for_doxygen #else template class PlainObjectBase : public internal::dense_xpr_base::type #endif { public: enum { Options = internal::traits::Options }; typedef typename internal::dense_xpr_base::type Base; typedef typename internal::traits::StorageKind StorageKind; typedef typename internal::traits::Index Index; typedef typename internal::traits::Scalar Scalar; typedef typename internal::packet_traits::type PacketScalar; typedef typename NumTraits::Real RealScalar; typedef Derived DenseType; using Base::RowsAtCompileTime; using Base::ColsAtCompileTime; using Base::SizeAtCompileTime; using Base::MaxRowsAtCompileTime; using Base::MaxColsAtCompileTime; using Base::MaxSizeAtCompileTime; using Base::IsVectorAtCompileTime; using Base::Flags; template friend class Eigen::Map; friend class Eigen::Map; typedef Eigen::Map MapType; friend class Eigen::Map; typedef const Eigen::Map ConstMapType; friend class Eigen::Map; typedef Eigen::Map AlignedMapType; friend class Eigen::Map; typedef const Eigen::Map ConstAlignedMapType; template struct StridedMapType { typedef Eigen::Map type; }; template struct StridedConstMapType { typedef Eigen::Map type; }; template struct StridedAlignedMapType { typedef Eigen::Map type; }; template struct StridedConstAlignedMapType { typedef Eigen::Map type; }; protected: DenseStorage m_storage; public: enum { NeedsToAlign = SizeAtCompileTime != Dynamic && (internal::traits::Flags & AlignedBit) != 0 }; EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) Base& base() { return *static_cast(this); } const Base& base() const { return *static_cast(this); } EIGEN_STRONG_INLINE Index rows() const { return m_storage.rows(); } EIGEN_STRONG_INLINE Index cols() const { return m_storage.cols(); } EIGEN_STRONG_INLINE const Scalar& coeff(Index rowId, Index colId) const { if(Flags & RowMajorBit) return m_storage.data()[colId + rowId * m_storage.cols()]; else // column-major return m_storage.data()[rowId + colId * m_storage.rows()]; } EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const { return m_storage.data()[index]; } EIGEN_STRONG_INLINE Scalar& coeffRef(Index rowId, Index colId) { if(Flags & RowMajorBit) return m_storage.data()[colId + rowId * m_storage.cols()]; else // column-major return m_storage.data()[rowId + colId * m_storage.rows()]; } EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return m_storage.data()[index]; } EIGEN_STRONG_INLINE const Scalar& coeffRef(Index rowId, Index colId) const { if(Flags & RowMajorBit) return m_storage.data()[colId + rowId * m_storage.cols()]; else // column-major return m_storage.data()[rowId + colId * m_storage.rows()]; } EIGEN_STRONG_INLINE const Scalar& coeffRef(Index index) const { return m_storage.data()[index]; } /** \internal */ template EIGEN_STRONG_INLINE PacketScalar packet(Index rowId, Index colId) const { return internal::ploadt (m_storage.data() + (Flags & RowMajorBit ? colId + rowId * m_storage.cols() : rowId + colId * m_storage.rows())); } /** \internal */ template EIGEN_STRONG_INLINE PacketScalar packet(Index index) const { return internal::ploadt(m_storage.data() + index); } /** \internal */ template EIGEN_STRONG_INLINE void writePacket(Index rowId, Index colId, const PacketScalar& val) { internal::pstoret (m_storage.data() + (Flags & RowMajorBit ? colId + rowId * m_storage.cols() : rowId + colId * m_storage.rows()), val); } /** \internal */ template EIGEN_STRONG_INLINE void writePacket(Index index, const PacketScalar& val) { internal::pstoret(m_storage.data() + index, val); } /** \returns a const pointer to the data array of this matrix */ EIGEN_STRONG_INLINE const Scalar *data() const { return m_storage.data(); } /** \returns a pointer to the data array of this matrix */ EIGEN_STRONG_INLINE Scalar *data() { return m_storage.data(); } /** Resizes \c *this to a \a rows x \a cols matrix. * * This method is intended for dynamic-size matrices, although it is legal to call it on any * matrix as long as fixed dimensions are left unchanged. If you only want to change the number * of rows and/or of columns, you can use resize(NoChange_t, Index), resize(Index, NoChange_t). * * If the current number of coefficients of \c *this exactly matches the * product \a rows * \a cols, then no memory allocation is performed and * the current values are left unchanged. In all other cases, including * shrinking, the data is reallocated and all previous values are lost. * * Example: \include Matrix_resize_int_int.cpp * Output: \verbinclude Matrix_resize_int_int.out * * \sa resize(Index) for vectors, resize(NoChange_t, Index), resize(Index, NoChange_t) */ EIGEN_STRONG_INLINE void resize(Index nbRows, Index nbCols) { eigen_assert( EIGEN_IMPLIES(RowsAtCompileTime!=Dynamic,nbRows==RowsAtCompileTime) && EIGEN_IMPLIES(ColsAtCompileTime!=Dynamic,nbCols==ColsAtCompileTime) && EIGEN_IMPLIES(RowsAtCompileTime==Dynamic && MaxRowsAtCompileTime!=Dynamic,nbRows<=MaxRowsAtCompileTime) && EIGEN_IMPLIES(ColsAtCompileTime==Dynamic && MaxColsAtCompileTime!=Dynamic,nbCols<=MaxColsAtCompileTime) && nbRows>=0 && nbCols>=0 && "Invalid sizes when resizing a matrix or array."); internal::check_rows_cols_for_overflow::run(nbRows, nbCols); #ifdef EIGEN_INITIALIZE_COEFFS Index size = nbRows*nbCols; bool size_changed = size != this->size(); m_storage.resize(size, nbRows, nbCols); if(size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED #else internal::check_rows_cols_for_overflow::run(nbRows, nbCols); m_storage.resize(nbRows*nbCols, nbRows, nbCols); #endif } /** Resizes \c *this to a vector of length \a size * * \only_for_vectors. This method does not work for * partially dynamic matrices when the static dimension is anything other * than 1. For example it will not work with Matrix. * * Example: \include Matrix_resize_int.cpp * Output: \verbinclude Matrix_resize_int.out * * \sa resize(Index,Index), resize(NoChange_t, Index), resize(Index, NoChange_t) */ inline void resize(Index size) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(PlainObjectBase) eigen_assert(((SizeAtCompileTime == Dynamic && (MaxSizeAtCompileTime==Dynamic || size<=MaxSizeAtCompileTime)) || SizeAtCompileTime == size) && size>=0); #ifdef EIGEN_INITIALIZE_COEFFS bool size_changed = size != this->size(); #endif if(RowsAtCompileTime == 1) m_storage.resize(size, 1, size); else m_storage.resize(size, size, 1); #ifdef EIGEN_INITIALIZE_COEFFS if(size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED #endif } /** Resizes the matrix, changing only the number of columns. For the parameter of type NoChange_t, just pass the special value \c NoChange * as in the example below. * * Example: \include Matrix_resize_NoChange_int.cpp * Output: \verbinclude Matrix_resize_NoChange_int.out * * \sa resize(Index,Index) */ inline void resize(NoChange_t, Index nbCols) { resize(rows(), nbCols); } /** Resizes the matrix, changing only the number of rows. For the parameter of type NoChange_t, just pass the special value \c NoChange * as in the example below. * * Example: \include Matrix_resize_int_NoChange.cpp * Output: \verbinclude Matrix_resize_int_NoChange.out * * \sa resize(Index,Index) */ inline void resize(Index nbRows, NoChange_t) { resize(nbRows, cols()); } /** Resizes \c *this to have the same dimensions as \a other. * Takes care of doing all the checking that's needed. * * Note that copying a row-vector into a vector (and conversely) is allowed. * The resizing, if any, is then done in the appropriate way so that row-vectors * remain row-vectors and vectors remain vectors. */ template EIGEN_STRONG_INLINE void resizeLike(const EigenBase& _other) { const OtherDerived& other = _other.derived(); internal::check_rows_cols_for_overflow::run(other.rows(), other.cols()); const Index othersize = other.rows()*other.cols(); if(RowsAtCompileTime == 1) { eigen_assert(other.rows() == 1 || other.cols() == 1); resize(1, othersize); } else if(ColsAtCompileTime == 1) { eigen_assert(other.rows() == 1 || other.cols() == 1); resize(othersize, 1); } else resize(other.rows(), other.cols()); } /** Resizes the matrix to \a rows x \a cols while leaving old values untouched. * * The method is intended for matrices of dynamic size. If you only want to change the number * of rows and/or of columns, you can use conservativeResize(NoChange_t, Index) or * conservativeResize(Index, NoChange_t). * * Matrices are resized relative to the top-left element. In case values need to be * appended to the matrix they will be uninitialized. */ EIGEN_STRONG_INLINE void conservativeResize(Index nbRows, Index nbCols) { internal::conservative_resize_like_impl::run(*this, nbRows, nbCols); } /** Resizes the matrix to \a rows x \a cols while leaving old values untouched. * * As opposed to conservativeResize(Index rows, Index cols), this version leaves * the number of columns unchanged. * * In case the matrix is growing, new rows will be uninitialized. */ EIGEN_STRONG_INLINE void conservativeResize(Index nbRows, NoChange_t) { // Note: see the comment in conservativeResize(Index,Index) conservativeResize(nbRows, cols()); } /** Resizes the matrix to \a rows x \a cols while leaving old values untouched. * * As opposed to conservativeResize(Index rows, Index cols), this version leaves * the number of rows unchanged. * * In case the matrix is growing, new columns will be uninitialized. */ EIGEN_STRONG_INLINE void conservativeResize(NoChange_t, Index nbCols) { // Note: see the comment in conservativeResize(Index,Index) conservativeResize(rows(), nbCols); } /** Resizes the vector to \a size while retaining old values. * * \only_for_vectors. This method does not work for * partially dynamic matrices when the static dimension is anything other * than 1. For example it will not work with Matrix. * * When values are appended, they will be uninitialized. */ EIGEN_STRONG_INLINE void conservativeResize(Index size) { internal::conservative_resize_like_impl::run(*this, size); } /** Resizes the matrix to \a rows x \a cols of \c other, while leaving old values untouched. * * The method is intended for matrices of dynamic size. If you only want to change the number * of rows and/or of columns, you can use conservativeResize(NoChange_t, Index) or * conservativeResize(Index, NoChange_t). * * Matrices are resized relative to the top-left element. In case values need to be * appended to the matrix they will copied from \c other. */ template EIGEN_STRONG_INLINE void conservativeResizeLike(const DenseBase& other) { internal::conservative_resize_like_impl::run(*this, other); } /** This is a special case of the templated operator=. Its purpose is to * prevent a default operator= from hiding the templated operator=. */ EIGEN_STRONG_INLINE Derived& operator=(const PlainObjectBase& other) { return _set(other); } /** \sa MatrixBase::lazyAssign() */ template EIGEN_STRONG_INLINE Derived& lazyAssign(const DenseBase& other) { _resize_to_match(other); return Base::lazyAssign(other.derived()); } template EIGEN_STRONG_INLINE Derived& operator=(const ReturnByValue& func) { resize(func.rows(), func.cols()); return Base::operator=(func); } EIGEN_STRONG_INLINE PlainObjectBase() : m_storage() { // _check_template_params(); // EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } #ifndef EIGEN_PARSED_BY_DOXYGEN // FIXME is it still needed ? /** \internal */ PlainObjectBase(internal::constructor_without_unaligned_array_assert) : m_storage(internal::constructor_without_unaligned_array_assert()) { // _check_template_params(); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } #endif EIGEN_STRONG_INLINE PlainObjectBase(Index a_size, Index nbRows, Index nbCols) : m_storage(a_size, nbRows, nbCols) { // _check_template_params(); // EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } /** \copydoc MatrixBase::operator=(const EigenBase&) */ template EIGEN_STRONG_INLINE Derived& operator=(const EigenBase &other) { _resize_to_match(other); Base::operator=(other.derived()); return this->derived(); } /** \sa MatrixBase::operator=(const EigenBase&) */ template EIGEN_STRONG_INLINE PlainObjectBase(const EigenBase &other) : m_storage(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols()) { _check_template_params(); internal::check_rows_cols_for_overflow::run(other.derived().rows(), other.derived().cols()); Base::operator=(other.derived()); } /** \name Map * These are convenience functions returning Map objects. The Map() static functions return unaligned Map objects, * while the AlignedMap() functions return aligned Map objects and thus should be called only with 16-byte-aligned * \a data pointers. * * \see class Map */ //@{ static inline ConstMapType Map(const Scalar* data) { return ConstMapType(data); } static inline MapType Map(Scalar* data) { return MapType(data); } static inline ConstMapType Map(const Scalar* data, Index size) { return ConstMapType(data, size); } static inline MapType Map(Scalar* data, Index size) { return MapType(data, size); } static inline ConstMapType Map(const Scalar* data, Index rows, Index cols) { return ConstMapType(data, rows, cols); } static inline MapType Map(Scalar* data, Index rows, Index cols) { return MapType(data, rows, cols); } static inline ConstAlignedMapType MapAligned(const Scalar* data) { return ConstAlignedMapType(data); } static inline AlignedMapType MapAligned(Scalar* data) { return AlignedMapType(data); } static inline ConstAlignedMapType MapAligned(const Scalar* data, Index size) { return ConstAlignedMapType(data, size); } static inline AlignedMapType MapAligned(Scalar* data, Index size) { return AlignedMapType(data, size); } static inline ConstAlignedMapType MapAligned(const Scalar* data, Index rows, Index cols) { return ConstAlignedMapType(data, rows, cols); } static inline AlignedMapType MapAligned(Scalar* data, Index rows, Index cols) { return AlignedMapType(data, rows, cols); } template static inline typename StridedConstMapType >::type Map(const Scalar* data, const Stride& stride) { return typename StridedConstMapType >::type(data, stride); } template static inline typename StridedMapType >::type Map(Scalar* data, const Stride& stride) { return typename StridedMapType >::type(data, stride); } template static inline typename StridedConstMapType >::type Map(const Scalar* data, Index size, const Stride& stride) { return typename StridedConstMapType >::type(data, size, stride); } template static inline typename StridedMapType >::type Map(Scalar* data, Index size, const Stride& stride) { return typename StridedMapType >::type(data, size, stride); } template static inline typename StridedConstMapType >::type Map(const Scalar* data, Index rows, Index cols, const Stride& stride) { return typename StridedConstMapType >::type(data, rows, cols, stride); } template static inline typename StridedMapType >::type Map(Scalar* data, Index rows, Index cols, const Stride& stride) { return typename StridedMapType >::type(data, rows, cols, stride); } template static inline typename StridedConstAlignedMapType >::type MapAligned(const Scalar* data, const Stride& stride) { return typename StridedConstAlignedMapType >::type(data, stride); } template static inline typename StridedAlignedMapType >::type MapAligned(Scalar* data, const Stride& stride) { return typename StridedAlignedMapType >::type(data, stride); } template static inline typename StridedConstAlignedMapType >::type MapAligned(const Scalar* data, Index size, const Stride& stride) { return typename StridedConstAlignedMapType >::type(data, size, stride); } template static inline typename StridedAlignedMapType >::type MapAligned(Scalar* data, Index size, const Stride& stride) { return typename StridedAlignedMapType >::type(data, size, stride); } template static inline typename StridedConstAlignedMapType >::type MapAligned(const Scalar* data, Index rows, Index cols, const Stride& stride) { return typename StridedConstAlignedMapType >::type(data, rows, cols, stride); } template static inline typename StridedAlignedMapType >::type MapAligned(Scalar* data, Index rows, Index cols, const Stride& stride) { return typename StridedAlignedMapType >::type(data, rows, cols, stride); } //@} using Base::setConstant; Derived& setConstant(Index size, const Scalar& value); Derived& setConstant(Index rows, Index cols, const Scalar& value); using Base::setZero; Derived& setZero(Index size); Derived& setZero(Index rows, Index cols); using Base::setOnes; Derived& setOnes(Index size); Derived& setOnes(Index rows, Index cols); using Base::setRandom; Derived& setRandom(Index size); Derived& setRandom(Index rows, Index cols); #ifdef EIGEN_PLAINOBJECTBASE_PLUGIN #include EIGEN_PLAINOBJECTBASE_PLUGIN #endif protected: /** \internal Resizes *this in preparation for assigning \a other to it. * Takes care of doing all the checking that's needed. * * Note that copying a row-vector into a vector (and conversely) is allowed. * The resizing, if any, is then done in the appropriate way so that row-vectors * remain row-vectors and vectors remain vectors. */ template EIGEN_STRONG_INLINE void _resize_to_match(const EigenBase& other) { #ifdef EIGEN_NO_AUTOMATIC_RESIZING eigen_assert((this->size()==0 || (IsVectorAtCompileTime ? (this->size() == other.size()) : (rows() == other.rows() && cols() == other.cols()))) && "Size mismatch. Automatic resizing is disabled because EIGEN_NO_AUTOMATIC_RESIZING is defined"); EIGEN_ONLY_USED_FOR_DEBUG(other); #else resizeLike(other); #endif } /** * \brief Copies the value of the expression \a other into \c *this with automatic resizing. * * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized), * it will be initialized. * * Note that copying a row-vector into a vector (and conversely) is allowed. * The resizing, if any, is then done in the appropriate way so that row-vectors * remain row-vectors and vectors remain vectors. * * \sa operator=(const MatrixBase&), _set_noalias() * * \internal */ template EIGEN_STRONG_INLINE Derived& _set(const DenseBase& other) { _set_selector(other.derived(), typename internal::conditional(int(OtherDerived::Flags) & EvalBeforeAssigningBit), internal::true_type, internal::false_type>::type()); return this->derived(); } template EIGEN_STRONG_INLINE void _set_selector(const OtherDerived& other, const internal::true_type&) { _set_noalias(other.eval()); } template EIGEN_STRONG_INLINE void _set_selector(const OtherDerived& other, const internal::false_type&) { _set_noalias(other); } /** \internal Like _set() but additionally makes the assumption that no aliasing effect can happen (which * is the case when creating a new matrix) so one can enforce lazy evaluation. * * \sa operator=(const MatrixBase&), _set() */ template EIGEN_STRONG_INLINE Derived& _set_noalias(const DenseBase& other) { // I don't think we need this resize call since the lazyAssign will anyways resize // and lazyAssign will be called by the assign selector. //_resize_to_match(other); // the 'false' below means to enforce lazy evaluation. We don't use lazyAssign() because // it wouldn't allow to copy a row-vector into a column-vector. return internal::assign_selector::run(this->derived(), other.derived()); } template EIGEN_STRONG_INLINE void _init2(Index nbRows, Index nbCols, typename internal::enable_if::type* = 0) { EIGEN_STATIC_ASSERT(bool(NumTraits::IsInteger) && bool(NumTraits::IsInteger), FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED) resize(nbRows,nbCols); } template EIGEN_STRONG_INLINE void _init2(const Scalar& val0, const Scalar& val1, typename internal::enable_if::type* = 0) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 2) m_storage.data()[0] = val0; m_storage.data()[1] = val1; } template friend struct internal::matrix_swap_impl; /** \internal generic implementation of swap for dense storage since for dynamic-sized matrices of same type it is enough to swap the * data pointers. */ template void _swap(DenseBase const & other) { enum { SwapPointers = internal::is_same::value && Base::SizeAtCompileTime==Dynamic }; internal::matrix_swap_impl::run(this->derived(), other.const_cast_derived()); } public: #ifndef EIGEN_PARSED_BY_DOXYGEN static EIGEN_STRONG_INLINE void _check_template_params() { EIGEN_STATIC_ASSERT((EIGEN_IMPLIES(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, (Options&RowMajor)==RowMajor) && EIGEN_IMPLIES(MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1, (Options&RowMajor)==0) && ((RowsAtCompileTime == Dynamic) || (RowsAtCompileTime >= 0)) && ((ColsAtCompileTime == Dynamic) || (ColsAtCompileTime >= 0)) && ((MaxRowsAtCompileTime == Dynamic) || (MaxRowsAtCompileTime >= 0)) && ((MaxColsAtCompileTime == Dynamic) || (MaxColsAtCompileTime >= 0)) && (MaxRowsAtCompileTime == RowsAtCompileTime || RowsAtCompileTime==Dynamic) && (MaxColsAtCompileTime == ColsAtCompileTime || ColsAtCompileTime==Dynamic) && (Options & (DontAlign|RowMajor)) == Options), INVALID_MATRIX_TEMPLATE_PARAMETERS) } #endif private: enum { ThisConstantIsPrivateInPlainObjectBase }; }; namespace internal { template struct conservative_resize_like_impl { typedef typename Derived::Index Index; static void run(DenseBase& _this, Index rows, Index cols) { if (_this.rows() == rows && _this.cols() == cols) return; EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived) if ( ( Derived::IsRowMajor && _this.cols() == cols) || // row-major and we change only the number of rows (!Derived::IsRowMajor && _this.rows() == rows) ) // column-major and we change only the number of columns { internal::check_rows_cols_for_overflow::run(rows, cols); _this.derived().m_storage.conservativeResize(rows*cols,rows,cols); } else { // The storage order does not allow us to use reallocation. typename Derived::PlainObject tmp(rows,cols); const Index common_rows = (std::min)(rows, _this.rows()); const Index common_cols = (std::min)(cols, _this.cols()); tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols); _this.derived().swap(tmp); } } static void run(DenseBase& _this, const DenseBase& other) { if (_this.rows() == other.rows() && _this.cols() == other.cols()) return; // Note: Here is space for improvement. Basically, for conservativeResize(Index,Index), // neither RowsAtCompileTime or ColsAtCompileTime must be Dynamic. If only one of the // dimensions is dynamic, one could use either conservativeResize(Index rows, NoChange_t) or // conservativeResize(NoChange_t, Index cols). For these methods new static asserts like // EIGEN_STATIC_ASSERT_DYNAMIC_ROWS and EIGEN_STATIC_ASSERT_DYNAMIC_COLS would be good. EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived) EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(OtherDerived) if ( ( Derived::IsRowMajor && _this.cols() == other.cols()) || // row-major and we change only the number of rows (!Derived::IsRowMajor && _this.rows() == other.rows()) ) // column-major and we change only the number of columns { const Index new_rows = other.rows() - _this.rows(); const Index new_cols = other.cols() - _this.cols(); _this.derived().m_storage.conservativeResize(other.size(),other.rows(),other.cols()); if (new_rows>0) _this.bottomRightCorner(new_rows, other.cols()) = other.bottomRows(new_rows); else if (new_cols>0) _this.bottomRightCorner(other.rows(), new_cols) = other.rightCols(new_cols); } else { // The storage order does not allow us to use reallocation. typename Derived::PlainObject tmp(other); const Index common_rows = (std::min)(tmp.rows(), _this.rows()); const Index common_cols = (std::min)(tmp.cols(), _this.cols()); tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols); _this.derived().swap(tmp); } } }; // Here, the specialization for vectors inherits from the general matrix case // to allow calling .conservativeResize(rows,cols) on vectors. template struct conservative_resize_like_impl : conservative_resize_like_impl { using conservative_resize_like_impl::run; typedef typename Derived::Index Index; static void run(DenseBase& _this, Index size) { const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : size; const Index new_cols = Derived::RowsAtCompileTime==1 ? size : 1; _this.derived().m_storage.conservativeResize(size,new_rows,new_cols); } static void run(DenseBase& _this, const DenseBase& other) { if (_this.rows() == other.rows() && _this.cols() == other.cols()) return; const Index num_new_elements = other.size() - _this.size(); const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : other.rows(); const Index new_cols = Derived::RowsAtCompileTime==1 ? other.cols() : 1; _this.derived().m_storage.conservativeResize(other.size(),new_rows,new_cols); if (num_new_elements > 0) _this.tail(num_new_elements) = other.tail(num_new_elements); } }; template struct matrix_swap_impl { static inline void run(MatrixTypeA& a, MatrixTypeB& b) { a.base().swap(b); } }; template struct matrix_swap_impl { static inline void run(MatrixTypeA& a, MatrixTypeB& b) { static_cast(a).m_storage.swap(static_cast(b).m_storage); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_DENSESTORAGEBASE_H antimony/lib/fab/vendor/Eigen/src/Core/Transpositions.h0000644000175000017500000003577513341130426022433 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010-2011 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_TRANSPOSITIONS_H #define EIGEN_TRANSPOSITIONS_H namespace Eigen { /** \class Transpositions * \ingroup Core_Module * * \brief Represents a sequence of transpositions (row/column interchange) * * \param SizeAtCompileTime the number of transpositions, or Dynamic * \param MaxSizeAtCompileTime the maximum number of transpositions, or Dynamic. This optional parameter defaults to SizeAtCompileTime. Most of the time, you should not have to specify it. * * This class represents a permutation transformation as a sequence of \em n transpositions * \f$[T_{n-1} \ldots T_{i} \ldots T_{0}]\f$. It is internally stored as a vector of integers \c indices. * Each transposition \f$ T_{i} \f$ applied on the left of a matrix (\f$ T_{i} M\f$) interchanges * the rows \c i and \c indices[i] of the matrix \c M. * A transposition applied on the right (e.g., \f$ M T_{i}\f$) yields a column interchange. * * Compared to the class PermutationMatrix, such a sequence of transpositions is what is * computed during a decomposition with pivoting, and it is faster when applying the permutation in-place. * * To apply a sequence of transpositions to a matrix, simply use the operator * as in the following example: * \code * Transpositions tr; * MatrixXf mat; * mat = tr * mat; * \endcode * In this example, we detect that the matrix appears on both side, and so the transpositions * are applied in-place without any temporary or extra copy. * * \sa class PermutationMatrix */ namespace internal { template struct transposition_matrix_product_retval; } template class TranspositionsBase { typedef internal::traits Traits; public: typedef typename Traits::IndicesType IndicesType; typedef typename IndicesType::Scalar Index; Derived& derived() { return *static_cast(this); } const Derived& derived() const { return *static_cast(this); } /** Copies the \a other transpositions into \c *this */ template Derived& operator=(const TranspositionsBase& other) { indices() = other.indices(); return derived(); } #ifndef EIGEN_PARSED_BY_DOXYGEN /** This is a special case of the templated operator=. Its purpose is to * prevent a default operator= from hiding the templated operator=. */ Derived& operator=(const TranspositionsBase& other) { indices() = other.indices(); return derived(); } #endif /** \returns the number of transpositions */ inline Index size() const { return indices().size(); } /** Direct access to the underlying index vector */ inline const Index& coeff(Index i) const { return indices().coeff(i); } /** Direct access to the underlying index vector */ inline Index& coeffRef(Index i) { return indices().coeffRef(i); } /** Direct access to the underlying index vector */ inline const Index& operator()(Index i) const { return indices()(i); } /** Direct access to the underlying index vector */ inline Index& operator()(Index i) { return indices()(i); } /** Direct access to the underlying index vector */ inline const Index& operator[](Index i) const { return indices()(i); } /** Direct access to the underlying index vector */ inline Index& operator[](Index i) { return indices()(i); } /** const version of indices(). */ const IndicesType& indices() const { return derived().indices(); } /** \returns a reference to the stored array representing the transpositions. */ IndicesType& indices() { return derived().indices(); } /** Resizes to given size. */ inline void resize(int newSize) { indices().resize(newSize); } /** Sets \c *this to represents an identity transformation */ void setIdentity() { for(int i = 0; i < indices().size(); ++i) coeffRef(i) = i; } // FIXME: do we want such methods ? // might be usefull when the target matrix expression is complex, e.g.: // object.matrix().block(..,..,..,..) = trans * object.matrix().block(..,..,..,..); /* template void applyForwardToRows(MatrixType& mat) const { for(Index k=0 ; k void applyBackwardToRows(MatrixType& mat) const { for(Index k=size()-1 ; k>=0 ; --k) if(m_indices(k)!=k) mat.row(k).swap(mat.row(m_indices(k))); } */ /** \returns the inverse transformation */ inline Transpose inverse() const { return Transpose(derived()); } /** \returns the tranpose transformation */ inline Transpose transpose() const { return Transpose(derived()); } protected: }; namespace internal { template struct traits > { typedef IndexType Index; typedef Matrix IndicesType; }; } template class Transpositions : public TranspositionsBase > { typedef internal::traits Traits; public: typedef TranspositionsBase Base; typedef typename Traits::IndicesType IndicesType; typedef typename IndicesType::Scalar Index; inline Transpositions() {} /** Copy constructor. */ template inline Transpositions(const TranspositionsBase& other) : m_indices(other.indices()) {} #ifndef EIGEN_PARSED_BY_DOXYGEN /** Standard copy constructor. Defined only to prevent a default copy constructor * from hiding the other templated constructor */ inline Transpositions(const Transpositions& other) : m_indices(other.indices()) {} #endif /** Generic constructor from expression of the transposition indices. */ template explicit inline Transpositions(const MatrixBase& a_indices) : m_indices(a_indices) {} /** Copies the \a other transpositions into \c *this */ template Transpositions& operator=(const TranspositionsBase& other) { return Base::operator=(other); } #ifndef EIGEN_PARSED_BY_DOXYGEN /** This is a special case of the templated operator=. Its purpose is to * prevent a default operator= from hiding the templated operator=. */ Transpositions& operator=(const Transpositions& other) { m_indices = other.m_indices; return *this; } #endif /** Constructs an uninitialized permutation matrix of given size. */ inline Transpositions(Index size) : m_indices(size) {} /** const version of indices(). */ const IndicesType& indices() const { return m_indices; } /** \returns a reference to the stored array representing the transpositions. */ IndicesType& indices() { return m_indices; } protected: IndicesType m_indices; }; namespace internal { template struct traits,_PacketAccess> > { typedef IndexType Index; typedef Map, _PacketAccess> IndicesType; }; } template class Map,PacketAccess> : public TranspositionsBase,PacketAccess> > { typedef internal::traits Traits; public: typedef TranspositionsBase Base; typedef typename Traits::IndicesType IndicesType; typedef typename IndicesType::Scalar Index; inline Map(const Index* indicesPtr) : m_indices(indicesPtr) {} inline Map(const Index* indicesPtr, Index size) : m_indices(indicesPtr,size) {} /** Copies the \a other transpositions into \c *this */ template Map& operator=(const TranspositionsBase& other) { return Base::operator=(other); } #ifndef EIGEN_PARSED_BY_DOXYGEN /** This is a special case of the templated operator=. Its purpose is to * prevent a default operator= from hiding the templated operator=. */ Map& operator=(const Map& other) { m_indices = other.m_indices; return *this; } #endif /** const version of indices(). */ const IndicesType& indices() const { return m_indices; } /** \returns a reference to the stored array representing the transpositions. */ IndicesType& indices() { return m_indices; } protected: IndicesType m_indices; }; namespace internal { template struct traits > { typedef typename _IndicesType::Scalar Index; typedef _IndicesType IndicesType; }; } template class TranspositionsWrapper : public TranspositionsBase > { typedef internal::traits Traits; public: typedef TranspositionsBase Base; typedef typename Traits::IndicesType IndicesType; typedef typename IndicesType::Scalar Index; inline TranspositionsWrapper(IndicesType& a_indices) : m_indices(a_indices) {} /** Copies the \a other transpositions into \c *this */ template TranspositionsWrapper& operator=(const TranspositionsBase& other) { return Base::operator=(other); } #ifndef EIGEN_PARSED_BY_DOXYGEN /** This is a special case of the templated operator=. Its purpose is to * prevent a default operator= from hiding the templated operator=. */ TranspositionsWrapper& operator=(const TranspositionsWrapper& other) { m_indices = other.m_indices; return *this; } #endif /** const version of indices(). */ const IndicesType& indices() const { return m_indices; } /** \returns a reference to the stored array representing the transpositions. */ IndicesType& indices() { return m_indices; } protected: const typename IndicesType::Nested m_indices; }; /** \returns the \a matrix with the \a transpositions applied to the columns. */ template inline const internal::transposition_matrix_product_retval operator*(const MatrixBase& matrix, const TranspositionsBase &transpositions) { return internal::transposition_matrix_product_retval (transpositions.derived(), matrix.derived()); } /** \returns the \a matrix with the \a transpositions applied to the rows. */ template inline const internal::transposition_matrix_product_retval operator*(const TranspositionsBase &transpositions, const MatrixBase& matrix) { return internal::transposition_matrix_product_retval (transpositions.derived(), matrix.derived()); } namespace internal { template struct traits > { typedef typename MatrixType::PlainObject ReturnType; }; template struct transposition_matrix_product_retval : public ReturnByValue > { typedef typename remove_all::type MatrixTypeNestedCleaned; typedef typename TranspositionType::Index Index; transposition_matrix_product_retval(const TranspositionType& tr, const MatrixType& matrix) : m_transpositions(tr), m_matrix(matrix) {} inline int rows() const { return m_matrix.rows(); } inline int cols() const { return m_matrix.cols(); } template inline void evalTo(Dest& dst) const { const int size = m_transpositions.size(); Index j = 0; if(!(is_same::value && extract_data(dst) == extract_data(m_matrix))) dst = m_matrix; for(int k=(Transposed?size-1:0) ; Transposed?k>=0:k class Transpose > { typedef TranspositionsDerived TranspositionType; typedef typename TranspositionType::IndicesType IndicesType; public: Transpose(const TranspositionType& t) : m_transpositions(t) {} inline int size() const { return m_transpositions.size(); } /** \returns the \a matrix with the inverse transpositions applied to the columns. */ template friend inline const internal::transposition_matrix_product_retval operator*(const MatrixBase& matrix, const Transpose& trt) { return internal::transposition_matrix_product_retval(trt.m_transpositions, matrix.derived()); } /** \returns the \a matrix with the inverse transpositions applied to the rows. */ template inline const internal::transposition_matrix_product_retval operator*(const MatrixBase& matrix) const { return internal::transposition_matrix_product_retval(m_transpositions, matrix.derived()); } protected: const TranspositionType& m_transpositions; }; } // end namespace Eigen #endif // EIGEN_TRANSPOSITIONS_H antimony/lib/fab/vendor/Eigen/src/Core/DenseStorage.h0000644000175000017500000003475113341130426021750 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // Copyright (C) 2006-2009 Benoit Jacob // Copyright (C) 2010 Hauke Heibel // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MATRIXSTORAGE_H #define EIGEN_MATRIXSTORAGE_H #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN #define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN EIGEN_DENSE_STORAGE_CTOR_PLUGIN; #else #define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN #endif namespace Eigen { namespace internal { struct constructor_without_unaligned_array_assert {}; template void check_static_allocation_size() { // if EIGEN_STACK_ALLOCATION_LIMIT is defined to 0, then no limit #if EIGEN_STACK_ALLOCATION_LIMIT EIGEN_STATIC_ASSERT(Size * sizeof(T) <= EIGEN_STACK_ALLOCATION_LIMIT, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG); #endif } /** \internal * Static array. If the MatrixOrArrayOptions require auto-alignment, the array will be automatically aligned: * to 16 bytes boundary if the total size is a multiple of 16 bytes. */ template struct plain_array { T array[Size]; plain_array() { check_static_allocation_size(); } plain_array(constructor_without_unaligned_array_assert) { check_static_allocation_size(); } }; #if defined(EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT) #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) #elif EIGEN_GNUC_AT_LEAST(4,7) // GCC 4.7 is too aggressive in its optimizations and remove the alignement test based on the fact the array is declared to be aligned. // See this bug report: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=53900 // Hiding the origin of the array pointer behind a function argument seems to do the trick even if the function is inlined: template EIGEN_ALWAYS_INLINE PtrType eigen_unaligned_array_assert_workaround_gcc47(PtrType array) { return array; } #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) \ eigen_assert((reinterpret_cast(eigen_unaligned_array_assert_workaround_gcc47(array)) & sizemask) == 0 \ && "this assertion is explained here: " \ "http://eigen.tuxfamily.org/dox-devel/group__TopicUnalignedArrayAssert.html" \ " **** READ THIS WEB PAGE !!! ****"); #else #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) \ eigen_assert((reinterpret_cast(array) & sizemask) == 0 \ && "this assertion is explained here: " \ "http://eigen.tuxfamily.org/dox-devel/group__TopicUnalignedArrayAssert.html" \ " **** READ THIS WEB PAGE !!! ****"); #endif template struct plain_array { EIGEN_USER_ALIGN16 T array[Size]; plain_array() { EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(0xf); check_static_allocation_size(); } plain_array(constructor_without_unaligned_array_assert) { check_static_allocation_size(); } }; template struct plain_array { EIGEN_USER_ALIGN16 T array[1]; plain_array() {} plain_array(constructor_without_unaligned_array_assert) {} }; } // end namespace internal /** \internal * * \class DenseStorage * \ingroup Core_Module * * \brief Stores the data of a matrix * * This class stores the data of fixed-size, dynamic-size or mixed matrices * in a way as compact as possible. * * \sa Matrix */ template class DenseStorage; // purely fixed-size matrix template class DenseStorage { internal::plain_array m_data; public: inline DenseStorage() {} inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(internal::constructor_without_unaligned_array_assert()) {} inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {} inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); } static inline DenseIndex rows(void) {return _Rows;} static inline DenseIndex cols(void) {return _Cols;} inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {} inline void resize(DenseIndex,DenseIndex,DenseIndex) {} inline const T *data() const { return m_data.array; } inline T *data() { return m_data.array; } }; // null matrix template class DenseStorage { public: inline DenseStorage() {} inline DenseStorage(internal::constructor_without_unaligned_array_assert) {} inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {} inline void swap(DenseStorage& ) {} static inline DenseIndex rows(void) {return _Rows;} static inline DenseIndex cols(void) {return _Cols;} inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {} inline void resize(DenseIndex,DenseIndex,DenseIndex) {} inline const T *data() const { return 0; } inline T *data() { return 0; } }; // more specializations for null matrices; these are necessary to resolve ambiguities template class DenseStorage : public DenseStorage { }; template class DenseStorage : public DenseStorage { }; template class DenseStorage : public DenseStorage { }; // dynamic-size matrix with fixed-size storage template class DenseStorage { internal::plain_array m_data; DenseIndex m_rows; DenseIndex m_cols; public: inline DenseStorage() : m_rows(0), m_cols(0) {} inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {} inline DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) : m_rows(nbRows), m_cols(nbCols) {} inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); } inline DenseIndex rows() const {return m_rows;} inline DenseIndex cols() const {return m_cols;} inline void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; } inline void resize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; } inline const T *data() const { return m_data.array; } inline T *data() { return m_data.array; } }; // dynamic-size matrix with fixed-size storage and fixed width template class DenseStorage { internal::plain_array m_data; DenseIndex m_rows; public: inline DenseStorage() : m_rows(0) {} inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0) {} inline DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex) : m_rows(nbRows) {} inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); } inline DenseIndex rows(void) const {return m_rows;} inline DenseIndex cols(void) const {return _Cols;} inline void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; } inline void resize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; } inline const T *data() const { return m_data.array; } inline T *data() { return m_data.array; } }; // dynamic-size matrix with fixed-size storage and fixed height template class DenseStorage { internal::plain_array m_data; DenseIndex m_cols; public: inline DenseStorage() : m_cols(0) {} inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(internal::constructor_without_unaligned_array_assert()), m_cols(0) {} inline DenseStorage(DenseIndex, DenseIndex, DenseIndex nbCols) : m_cols(nbCols) {} inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); } inline DenseIndex rows(void) const {return _Rows;} inline DenseIndex cols(void) const {return m_cols;} inline void conservativeResize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; } inline void resize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; } inline const T *data() const { return m_data.array; } inline T *data() { return m_data.array; } }; // purely dynamic matrix. template class DenseStorage { T *m_data; DenseIndex m_rows; DenseIndex m_cols; public: inline DenseStorage() : m_data(0), m_rows(0), m_cols(0) {} inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0), m_cols(0) {} inline DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols) : m_data(internal::conditional_aligned_new_auto(size)), m_rows(nbRows), m_cols(nbCols) { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN } inline ~DenseStorage() { internal::conditional_aligned_delete_auto(m_data, m_rows*m_cols); } inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); } inline DenseIndex rows(void) const {return m_rows;} inline DenseIndex cols(void) const {return m_cols;} inline void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols) { m_data = internal::conditional_aligned_realloc_new_auto(m_data, size, m_rows*m_cols); m_rows = nbRows; m_cols = nbCols; } void resize(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols) { if(size != m_rows*m_cols) { internal::conditional_aligned_delete_auto(m_data, m_rows*m_cols); if (size) m_data = internal::conditional_aligned_new_auto(size); else m_data = 0; EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN } m_rows = nbRows; m_cols = nbCols; } inline const T *data() const { return m_data; } inline T *data() { return m_data; } }; // matrix with dynamic width and fixed height (so that matrix has dynamic size). template class DenseStorage { T *m_data; DenseIndex m_cols; public: inline DenseStorage() : m_data(0), m_cols(0) {} inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {} inline DenseStorage(DenseIndex size, DenseIndex, DenseIndex nbCols) : m_data(internal::conditional_aligned_new_auto(size)), m_cols(nbCols) { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN } inline ~DenseStorage() { internal::conditional_aligned_delete_auto(m_data, _Rows*m_cols); } inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); } static inline DenseIndex rows(void) {return _Rows;} inline DenseIndex cols(void) const {return m_cols;} inline void conservativeResize(DenseIndex size, DenseIndex, DenseIndex nbCols) { m_data = internal::conditional_aligned_realloc_new_auto(m_data, size, _Rows*m_cols); m_cols = nbCols; } EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex, DenseIndex nbCols) { if(size != _Rows*m_cols) { internal::conditional_aligned_delete_auto(m_data, _Rows*m_cols); if (size) m_data = internal::conditional_aligned_new_auto(size); else m_data = 0; EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN } m_cols = nbCols; } inline const T *data() const { return m_data; } inline T *data() { return m_data; } }; // matrix with dynamic height and fixed width (so that matrix has dynamic size). template class DenseStorage { T *m_data; DenseIndex m_rows; public: inline DenseStorage() : m_data(0), m_rows(0) {} inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {} inline DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex) : m_data(internal::conditional_aligned_new_auto(size)), m_rows(nbRows) { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN } inline ~DenseStorage() { internal::conditional_aligned_delete_auto(m_data, _Cols*m_rows); } inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); } inline DenseIndex rows(void) const {return m_rows;} static inline DenseIndex cols(void) {return _Cols;} inline void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex) { m_data = internal::conditional_aligned_realloc_new_auto(m_data, size, m_rows*_Cols); m_rows = nbRows; } EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex nbRows, DenseIndex) { if(size != m_rows*_Cols) { internal::conditional_aligned_delete_auto(m_data, _Cols*m_rows); if (size) m_data = internal::conditional_aligned_new_auto(size); else m_data = 0; EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN } m_rows = nbRows; } inline const T *data() const { return m_data; } inline T *data() { return m_data; } }; } // end namespace Eigen #endif // EIGEN_MATRIX_H antimony/lib/fab/vendor/Eigen/src/Core/Replicate.h0000644000175000017500000001552713341130426021275 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_REPLICATE_H #define EIGEN_REPLICATE_H namespace Eigen { /** * \class Replicate * \ingroup Core_Module * * \brief Expression of the multiple replication of a matrix or vector * * \param MatrixType the type of the object we are replicating * * This class represents an expression of the multiple replication of a matrix or vector. * It is the return type of DenseBase::replicate() and most of the time * this is the only way it is used. * * \sa DenseBase::replicate() */ namespace internal { template struct traits > : traits { typedef typename MatrixType::Scalar Scalar; typedef typename traits::StorageKind StorageKind; typedef typename traits::XprKind XprKind; enum { Factor = (RowFactor==Dynamic || ColFactor==Dynamic) ? Dynamic : RowFactor*ColFactor }; typedef typename nested::type MatrixTypeNested; typedef typename remove_reference::type _MatrixTypeNested; enum { RowsAtCompileTime = RowFactor==Dynamic || int(MatrixType::RowsAtCompileTime)==Dynamic ? Dynamic : RowFactor * MatrixType::RowsAtCompileTime, ColsAtCompileTime = ColFactor==Dynamic || int(MatrixType::ColsAtCompileTime)==Dynamic ? Dynamic : ColFactor * MatrixType::ColsAtCompileTime, //FIXME we don't propagate the max sizes !!! MaxRowsAtCompileTime = RowsAtCompileTime, MaxColsAtCompileTime = ColsAtCompileTime, IsRowMajor = MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1 ? 1 : MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1 ? 0 : (MatrixType::Flags & RowMajorBit) ? 1 : 0, Flags = (_MatrixTypeNested::Flags & HereditaryBits & ~RowMajorBit) | (IsRowMajor ? RowMajorBit : 0), CoeffReadCost = _MatrixTypeNested::CoeffReadCost }; }; } template class Replicate : public internal::dense_xpr_base< Replicate >::type { typedef typename internal::traits::MatrixTypeNested MatrixTypeNested; typedef typename internal::traits::_MatrixTypeNested _MatrixTypeNested; public: typedef typename internal::dense_xpr_base::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(Replicate) template inline explicit Replicate(const OriginalMatrixType& a_matrix) : m_matrix(a_matrix), m_rowFactor(RowFactor), m_colFactor(ColFactor) { EIGEN_STATIC_ASSERT((internal::is_same::type,OriginalMatrixType>::value), THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE) eigen_assert(RowFactor!=Dynamic && ColFactor!=Dynamic); } template inline Replicate(const OriginalMatrixType& a_matrix, Index rowFactor, Index colFactor) : m_matrix(a_matrix), m_rowFactor(rowFactor), m_colFactor(colFactor) { EIGEN_STATIC_ASSERT((internal::is_same::type,OriginalMatrixType>::value), THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE) } inline Index rows() const { return m_matrix.rows() * m_rowFactor.value(); } inline Index cols() const { return m_matrix.cols() * m_colFactor.value(); } inline Scalar coeff(Index rowId, Index colId) const { // try to avoid using modulo; this is a pure optimization strategy const Index actual_row = internal::traits::RowsAtCompileTime==1 ? 0 : RowFactor==1 ? rowId : rowId%m_matrix.rows(); const Index actual_col = internal::traits::ColsAtCompileTime==1 ? 0 : ColFactor==1 ? colId : colId%m_matrix.cols(); return m_matrix.coeff(actual_row, actual_col); } template inline PacketScalar packet(Index rowId, Index colId) const { const Index actual_row = internal::traits::RowsAtCompileTime==1 ? 0 : RowFactor==1 ? rowId : rowId%m_matrix.rows(); const Index actual_col = internal::traits::ColsAtCompileTime==1 ? 0 : ColFactor==1 ? colId : colId%m_matrix.cols(); return m_matrix.template packet(actual_row, actual_col); } const _MatrixTypeNested& nestedExpression() const { return m_matrix; } protected: MatrixTypeNested m_matrix; const internal::variable_if_dynamic m_rowFactor; const internal::variable_if_dynamic m_colFactor; }; /** * \return an expression of the replication of \c *this * * Example: \include MatrixBase_replicate.cpp * Output: \verbinclude MatrixBase_replicate.out * * \sa VectorwiseOp::replicate(), DenseBase::replicate(Index,Index), class Replicate */ template template const Replicate DenseBase::replicate() const { return Replicate(derived()); } /** * \return an expression of the replication of \c *this * * Example: \include MatrixBase_replicate_int_int.cpp * Output: \verbinclude MatrixBase_replicate_int_int.out * * \sa VectorwiseOp::replicate(), DenseBase::replicate(), class Replicate */ template const typename DenseBase::ReplicateReturnType DenseBase::replicate(Index rowFactor,Index colFactor) const { return Replicate(derived(),rowFactor,colFactor); } /** * \return an expression of the replication of each column (or row) of \c *this * * Example: \include DirectionWise_replicate_int.cpp * Output: \verbinclude DirectionWise_replicate_int.out * * \sa VectorwiseOp::replicate(), DenseBase::replicate(), class Replicate */ template const typename VectorwiseOp::ReplicateReturnType VectorwiseOp::replicate(Index factor) const { return typename VectorwiseOp::ReplicateReturnType (_expression(),Direction==Vertical?factor:1,Direction==Horizontal?factor:1); } } // end namespace Eigen #endif // EIGEN_REPLICATE_H antimony/lib/fab/vendor/Eigen/src/Core/MathFunctions.h0000644000175000017500000005266413341130426022152 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2010 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MATHFUNCTIONS_H #define EIGEN_MATHFUNCTIONS_H namespace Eigen { namespace internal { /** \internal \struct global_math_functions_filtering_base * * What it does: * Defines a typedef 'type' as follows: * - if type T has a member typedef Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl, then * global_math_functions_filtering_base::type is a typedef for it. * - otherwise, global_math_functions_filtering_base::type is a typedef for T. * * How it's used: * To allow to defined the global math functions (like sin...) in certain cases, like the Array expressions. * When you do sin(array1+array2), the object array1+array2 has a complicated expression type, all what you want to know * is that it inherits ArrayBase. So we implement a partial specialization of sin_impl for ArrayBase. * So we must make sure to use sin_impl > and not sin_impl, otherwise our partial specialization * won't be used. How does sin know that? That's exactly what global_math_functions_filtering_base tells it. * * How it's implemented: * SFINAE in the style of enable_if. Highly susceptible of breaking compilers. With GCC, it sure does work, but if you replace * the typename dummy by an integer template parameter, it doesn't work anymore! */ template struct global_math_functions_filtering_base { typedef T type; }; template struct always_void { typedef void type; }; template struct global_math_functions_filtering_base ::type > { typedef typename T::Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl type; }; #define EIGEN_MATHFUNC_IMPL(func, scalar) Eigen::internal::func##_impl::type> #define EIGEN_MATHFUNC_RETVAL(func, scalar) typename Eigen::internal::func##_retval::type>::type /**************************************************************************** * Implementation of real * ****************************************************************************/ template::IsComplex> struct real_default_impl { typedef typename NumTraits::Real RealScalar; static inline RealScalar run(const Scalar& x) { return x; } }; template struct real_default_impl { typedef typename NumTraits::Real RealScalar; static inline RealScalar run(const Scalar& x) { using std::real; return real(x); } }; template struct real_impl : real_default_impl {}; template struct real_retval { typedef typename NumTraits::Real type; }; /**************************************************************************** * Implementation of imag * ****************************************************************************/ template::IsComplex> struct imag_default_impl { typedef typename NumTraits::Real RealScalar; static inline RealScalar run(const Scalar&) { return RealScalar(0); } }; template struct imag_default_impl { typedef typename NumTraits::Real RealScalar; static inline RealScalar run(const Scalar& x) { using std::imag; return imag(x); } }; template struct imag_impl : imag_default_impl {}; template struct imag_retval { typedef typename NumTraits::Real type; }; /**************************************************************************** * Implementation of real_ref * ****************************************************************************/ template struct real_ref_impl { typedef typename NumTraits::Real RealScalar; static inline RealScalar& run(Scalar& x) { return reinterpret_cast(&x)[0]; } static inline const RealScalar& run(const Scalar& x) { return reinterpret_cast(&x)[0]; } }; template struct real_ref_retval { typedef typename NumTraits::Real & type; }; /**************************************************************************** * Implementation of imag_ref * ****************************************************************************/ template struct imag_ref_default_impl { typedef typename NumTraits::Real RealScalar; static inline RealScalar& run(Scalar& x) { return reinterpret_cast(&x)[1]; } static inline const RealScalar& run(const Scalar& x) { return reinterpret_cast(&x)[1]; } }; template struct imag_ref_default_impl { static inline Scalar run(Scalar&) { return Scalar(0); } static inline const Scalar run(const Scalar&) { return Scalar(0); } }; template struct imag_ref_impl : imag_ref_default_impl::IsComplex> {}; template struct imag_ref_retval { typedef typename NumTraits::Real & type; }; /**************************************************************************** * Implementation of conj * ****************************************************************************/ template::IsComplex> struct conj_impl { static inline Scalar run(const Scalar& x) { return x; } }; template struct conj_impl { static inline Scalar run(const Scalar& x) { using std::conj; return conj(x); } }; template struct conj_retval { typedef Scalar type; }; /**************************************************************************** * Implementation of abs2 * ****************************************************************************/ template struct abs2_impl { typedef typename NumTraits::Real RealScalar; static inline RealScalar run(const Scalar& x) { return x*x; } }; template struct abs2_impl > { static inline RealScalar run(const std::complex& x) { return real(x)*real(x) + imag(x)*imag(x); } }; template struct abs2_retval { typedef typename NumTraits::Real type; }; /**************************************************************************** * Implementation of norm1 * ****************************************************************************/ template struct norm1_default_impl { typedef typename NumTraits::Real RealScalar; static inline RealScalar run(const Scalar& x) { using std::abs; return abs(real(x)) + abs(imag(x)); } }; template struct norm1_default_impl { static inline Scalar run(const Scalar& x) { using std::abs; return abs(x); } }; template struct norm1_impl : norm1_default_impl::IsComplex> {}; template struct norm1_retval { typedef typename NumTraits::Real type; }; /**************************************************************************** * Implementation of hypot * ****************************************************************************/ template struct hypot_impl { typedef typename NumTraits::Real RealScalar; static inline RealScalar run(const Scalar& x, const Scalar& y) { using std::max; using std::min; using std::abs; using std::sqrt; RealScalar _x = abs(x); RealScalar _y = abs(y); RealScalar p = (max)(_x, _y); if(p==RealScalar(0)) return 0; RealScalar q = (min)(_x, _y); RealScalar qp = q/p; return p * sqrt(RealScalar(1) + qp*qp); } }; template struct hypot_retval { typedef typename NumTraits::Real type; }; /**************************************************************************** * Implementation of cast * ****************************************************************************/ template struct cast_impl { static inline NewType run(const OldType& x) { return static_cast(x); } }; // here, for once, we're plainly returning NewType: we don't want cast to do weird things. template inline NewType cast(const OldType& x) { return cast_impl::run(x); } /**************************************************************************** * Implementation of atanh2 * ****************************************************************************/ template struct atanh2_default_impl { typedef Scalar retval; typedef typename NumTraits::Real RealScalar; static inline Scalar run(const Scalar& x, const Scalar& y) { using std::abs; using std::log; using std::sqrt; Scalar z = x / y; if (y == Scalar(0) || abs(z) > sqrt(NumTraits::epsilon())) return RealScalar(0.5) * log((y + x) / (y - x)); else return z + z*z*z / RealScalar(3); } }; template struct atanh2_default_impl { static inline Scalar run(const Scalar&, const Scalar&) { EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) return Scalar(0); } }; template struct atanh2_impl : atanh2_default_impl::IsInteger> {}; template struct atanh2_retval { typedef Scalar type; }; /**************************************************************************** * Implementation of pow * ****************************************************************************/ template struct pow_default_impl { typedef Scalar retval; static inline Scalar run(const Scalar& x, const Scalar& y) { using std::pow; return pow(x, y); } }; template struct pow_default_impl { static inline Scalar run(Scalar x, Scalar y) { Scalar res(1); eigen_assert(!NumTraits::IsSigned || y >= 0); if(y & 1) res *= x; y >>= 1; while(y) { x *= x; if(y&1) res *= x; y >>= 1; } return res; } }; template struct pow_impl : pow_default_impl::IsInteger> {}; template struct pow_retval { typedef Scalar type; }; /**************************************************************************** * Implementation of random * ****************************************************************************/ template struct random_default_impl {}; template struct random_impl : random_default_impl::IsComplex, NumTraits::IsInteger> {}; template struct random_retval { typedef Scalar type; }; template inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(const Scalar& x, const Scalar& y); template inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(); template struct random_default_impl { static inline Scalar run(const Scalar& x, const Scalar& y) { return x + (y-x) * Scalar(std::rand()) / Scalar(RAND_MAX); } static inline Scalar run() { return run(Scalar(NumTraits::IsSigned ? -1 : 0), Scalar(1)); } }; enum { floor_log2_terminate, floor_log2_move_up, floor_log2_move_down, floor_log2_bogus }; template struct floor_log2_selector { enum { middle = (lower + upper) / 2, value = (upper <= lower + 1) ? int(floor_log2_terminate) : (n < (1 << middle)) ? int(floor_log2_move_down) : (n==0) ? int(floor_log2_bogus) : int(floor_log2_move_up) }; }; template::value> struct floor_log2 {}; template struct floor_log2 { enum { value = floor_log2::middle>::value }; }; template struct floor_log2 { enum { value = floor_log2::middle, upper>::value }; }; template struct floor_log2 { enum { value = (n >= ((unsigned int)(1) << (lower+1))) ? lower+1 : lower }; }; template struct floor_log2 { // no value, error at compile time }; template struct random_default_impl { typedef typename NumTraits::NonInteger NonInteger; static inline Scalar run(const Scalar& x, const Scalar& y) { return x + Scalar((NonInteger(y)-x+1) * std::rand() / (RAND_MAX + NonInteger(1))); } static inline Scalar run() { #ifdef EIGEN_MAKING_DOCS return run(Scalar(NumTraits::IsSigned ? -10 : 0), Scalar(10)); #else enum { rand_bits = floor_log2<(unsigned int)(RAND_MAX)+1>::value, scalar_bits = sizeof(Scalar) * CHAR_BIT, shift = EIGEN_PLAIN_ENUM_MAX(0, int(rand_bits) - int(scalar_bits)), offset = NumTraits::IsSigned ? (1 << (EIGEN_PLAIN_ENUM_MIN(rand_bits,scalar_bits)-1)) : 0 }; return Scalar((std::rand() >> shift) - offset); #endif } }; template struct random_default_impl { static inline Scalar run(const Scalar& x, const Scalar& y) { return Scalar(random(real(x), real(y)), random(imag(x), imag(y))); } static inline Scalar run() { typedef typename NumTraits::Real RealScalar; return Scalar(random(), random()); } }; template inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(const Scalar& x, const Scalar& y) { return EIGEN_MATHFUNC_IMPL(random, Scalar)::run(x, y); } template inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random() { return EIGEN_MATHFUNC_IMPL(random, Scalar)::run(); } } // end namespace internal /**************************************************************************** * Generic math function * ****************************************************************************/ namespace numext { template inline EIGEN_MATHFUNC_RETVAL(real, Scalar) real(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(real, Scalar)::run(x); } template inline typename internal::add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) >::type real_ref(const Scalar& x) { return internal::real_ref_impl::run(x); } template inline EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) real_ref(Scalar& x) { return EIGEN_MATHFUNC_IMPL(real_ref, Scalar)::run(x); } template inline EIGEN_MATHFUNC_RETVAL(imag, Scalar) imag(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(imag, Scalar)::run(x); } template inline typename internal::add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) >::type imag_ref(const Scalar& x) { return internal::imag_ref_impl::run(x); } template inline EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) imag_ref(Scalar& x) { return EIGEN_MATHFUNC_IMPL(imag_ref, Scalar)::run(x); } template inline EIGEN_MATHFUNC_RETVAL(conj, Scalar) conj(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(conj, Scalar)::run(x); } template inline EIGEN_MATHFUNC_RETVAL(abs2, Scalar) abs2(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(abs2, Scalar)::run(x); } template inline EIGEN_MATHFUNC_RETVAL(norm1, Scalar) norm1(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(norm1, Scalar)::run(x); } template inline EIGEN_MATHFUNC_RETVAL(hypot, Scalar) hypot(const Scalar& x, const Scalar& y) { return EIGEN_MATHFUNC_IMPL(hypot, Scalar)::run(x, y); } template inline EIGEN_MATHFUNC_RETVAL(atanh2, Scalar) atanh2(const Scalar& x, const Scalar& y) { return EIGEN_MATHFUNC_IMPL(atanh2, Scalar)::run(x, y); } template inline EIGEN_MATHFUNC_RETVAL(pow, Scalar) pow(const Scalar& x, const Scalar& y) { return EIGEN_MATHFUNC_IMPL(pow, Scalar)::run(x, y); } // std::isfinite is non standard, so let's define our own version, // even though it is not very efficient. template bool (isfinite)(const T& x) { return x::highest() && x>NumTraits::lowest(); } } // end namespace numext namespace internal { /**************************************************************************** * Implementation of fuzzy comparisons * ****************************************************************************/ template struct scalar_fuzzy_default_impl {}; template struct scalar_fuzzy_default_impl { typedef typename NumTraits::Real RealScalar; template static inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, const RealScalar& prec) { using std::abs; return abs(x) <= abs(y) * prec; } static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec) { using std::min; using std::abs; return abs(x - y) <= (min)(abs(x), abs(y)) * prec; } static inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const RealScalar& prec) { return x <= y || isApprox(x, y, prec); } }; template struct scalar_fuzzy_default_impl { typedef typename NumTraits::Real RealScalar; template static inline bool isMuchSmallerThan(const Scalar& x, const Scalar&, const RealScalar&) { return x == Scalar(0); } static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar&) { return x == y; } static inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const RealScalar&) { return x <= y; } }; template struct scalar_fuzzy_default_impl { typedef typename NumTraits::Real RealScalar; template static inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, const RealScalar& prec) { return numext::abs2(x) <= numext::abs2(y) * prec * prec; } static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec) { using std::min; return numext::abs2(x - y) <= (min)(numext::abs2(x), numext::abs2(y)) * prec * prec; } }; template struct scalar_fuzzy_impl : scalar_fuzzy_default_impl::IsComplex, NumTraits::IsInteger> {}; template inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, typename NumTraits::Real precision = NumTraits::dummy_precision()) { return scalar_fuzzy_impl::template isMuchSmallerThan(x, y, precision); } template inline bool isApprox(const Scalar& x, const Scalar& y, typename NumTraits::Real precision = NumTraits::dummy_precision()) { return scalar_fuzzy_impl::isApprox(x, y, precision); } template inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, typename NumTraits::Real precision = NumTraits::dummy_precision()) { return scalar_fuzzy_impl::isApproxOrLessThan(x, y, precision); } /****************************************** *** The special case of the bool type *** ******************************************/ template<> struct random_impl { static inline bool run() { return random(0,1)==0 ? false : true; } }; template<> struct scalar_fuzzy_impl { typedef bool RealScalar; template static inline bool isMuchSmallerThan(const bool& x, const bool&, const bool&) { return !x; } static inline bool isApprox(bool x, bool y, bool) { return x == y; } static inline bool isApproxOrLessThan(const bool& x, const bool& y, const bool&) { return (!x) || y; } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_MATHFUNCTIONS_H antimony/lib/fab/vendor/Eigen/src/Core/VectorBlock.h0000644000175000017500000000653113341130426021575 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud // Copyright (C) 2006-2008 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_VECTORBLOCK_H #define EIGEN_VECTORBLOCK_H namespace Eigen { /** \class VectorBlock * \ingroup Core_Module * * \brief Expression of a fixed-size or dynamic-size sub-vector * * \param VectorType the type of the object in which we are taking a sub-vector * \param Size size of the sub-vector we are taking at compile time (optional) * * This class represents an expression of either a fixed-size or dynamic-size sub-vector. * It is the return type of DenseBase::segment(Index,Index) and DenseBase::segment(Index) and * most of the time this is the only way it is used. * * However, if you want to directly maniputate sub-vector expressions, * for instance if you want to write a function returning such an expression, you * will need to use this class. * * Here is an example illustrating the dynamic case: * \include class_VectorBlock.cpp * Output: \verbinclude class_VectorBlock.out * * \note Even though this expression has dynamic size, in the case where \a VectorType * has fixed size, this expression inherits a fixed maximal size which means that evaluating * it does not cause a dynamic memory allocation. * * Here is an example illustrating the fixed-size case: * \include class_FixedVectorBlock.cpp * Output: \verbinclude class_FixedVectorBlock.out * * \sa class Block, DenseBase::segment(Index,Index,Index,Index), DenseBase::segment(Index,Index) */ namespace internal { template struct traits > : public traits::Flags & RowMajorBit ? 1 : Size, traits::Flags & RowMajorBit ? Size : 1> > { }; } template class VectorBlock : public Block::Flags & RowMajorBit ? 1 : Size, internal::traits::Flags & RowMajorBit ? Size : 1> { typedef Block::Flags & RowMajorBit ? 1 : Size, internal::traits::Flags & RowMajorBit ? Size : 1> Base; enum { IsColVector = !(internal::traits::Flags & RowMajorBit) }; public: EIGEN_DENSE_PUBLIC_INTERFACE(VectorBlock) using Base::operator=; /** Dynamic-size constructor */ inline VectorBlock(VectorType& vector, Index start, Index size) : Base(vector, IsColVector ? start : 0, IsColVector ? 0 : start, IsColVector ? size : 1, IsColVector ? 1 : size) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorBlock); } /** Fixed-size constructor */ inline VectorBlock(VectorType& vector, Index start) : Base(vector, IsColVector ? start : 0, IsColVector ? 0 : start) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorBlock); } }; } // end namespace Eigen #endif // EIGEN_VECTORBLOCK_H antimony/lib/fab/vendor/Eigen/src/Core/Map.h0000644000175000017500000001773213341130426020102 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2007-2010 Benoit Jacob // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MAP_H #define EIGEN_MAP_H namespace Eigen { /** \class Map * \ingroup Core_Module * * \brief A matrix or vector expression mapping an existing array of data. * * \tparam PlainObjectType the equivalent matrix type of the mapped data * \tparam MapOptions specifies whether the pointer is \c #Aligned, or \c #Unaligned. * The default is \c #Unaligned. * \tparam StrideType optionally specifies strides. By default, Map assumes the memory layout * of an ordinary, contiguous array. This can be overridden by specifying strides. * The type passed here must be a specialization of the Stride template, see examples below. * * This class represents a matrix or vector expression mapping an existing array of data. * It can be used to let Eigen interface without any overhead with non-Eigen data structures, * such as plain C arrays or structures from other libraries. By default, it assumes that the * data is laid out contiguously in memory. You can however override this by explicitly specifying * inner and outer strides. * * Here's an example of simply mapping a contiguous array as a \ref TopicStorageOrders "column-major" matrix: * \include Map_simple.cpp * Output: \verbinclude Map_simple.out * * If you need to map non-contiguous arrays, you can do so by specifying strides: * * Here's an example of mapping an array as a vector, specifying an inner stride, that is, the pointer * increment between two consecutive coefficients. Here, we're specifying the inner stride as a compile-time * fixed value. * \include Map_inner_stride.cpp * Output: \verbinclude Map_inner_stride.out * * Here's an example of mapping an array while specifying an outer stride. Here, since we're mapping * as a column-major matrix, 'outer stride' means the pointer increment between two consecutive columns. * Here, we're specifying the outer stride as a runtime parameter. Note that here \c OuterStride<> is * a short version of \c OuterStride because the default template parameter of OuterStride * is \c Dynamic * \include Map_outer_stride.cpp * Output: \verbinclude Map_outer_stride.out * * For more details and for an example of specifying both an inner and an outer stride, see class Stride. * * \b Tip: to change the array of data mapped by a Map object, you can use the C++ * placement new syntax: * * Example: \include Map_placement_new.cpp * Output: \verbinclude Map_placement_new.out * * This class is the return type of PlainObjectBase::Map() but can also be used directly. * * \sa PlainObjectBase::Map(), \ref TopicStorageOrders */ namespace internal { template struct traits > : public traits { typedef traits TraitsBase; typedef typename PlainObjectType::Index Index; typedef typename PlainObjectType::Scalar Scalar; enum { InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0 ? int(PlainObjectType::InnerStrideAtCompileTime) : int(StrideType::InnerStrideAtCompileTime), OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0 ? int(PlainObjectType::OuterStrideAtCompileTime) : int(StrideType::OuterStrideAtCompileTime), HasNoInnerStride = InnerStrideAtCompileTime == 1, HasNoOuterStride = StrideType::OuterStrideAtCompileTime == 0, HasNoStride = HasNoInnerStride && HasNoOuterStride, IsAligned = bool(EIGEN_ALIGN) && ((int(MapOptions)&Aligned)==Aligned), IsDynamicSize = PlainObjectType::SizeAtCompileTime==Dynamic, KeepsPacketAccess = bool(HasNoInnerStride) && ( bool(IsDynamicSize) || HasNoOuterStride || ( OuterStrideAtCompileTime!=Dynamic && ((static_cast(sizeof(Scalar))*OuterStrideAtCompileTime)%16)==0 ) ), Flags0 = TraitsBase::Flags & (~NestByRefBit), Flags1 = IsAligned ? (int(Flags0) | AlignedBit) : (int(Flags0) & ~AlignedBit), Flags2 = (bool(HasNoStride) || bool(PlainObjectType::IsVectorAtCompileTime)) ? int(Flags1) : int(Flags1 & ~LinearAccessBit), Flags3 = is_lvalue::value ? int(Flags2) : (int(Flags2) & ~LvalueBit), Flags = KeepsPacketAccess ? int(Flags3) : (int(Flags3) & ~PacketAccessBit) }; private: enum { Options }; // Expressions don't have Options }; } template class Map : public MapBase > { public: typedef MapBase Base; EIGEN_DENSE_PUBLIC_INTERFACE(Map) typedef typename Base::PointerType PointerType; #if EIGEN2_SUPPORT_STAGE <= STAGE30_FULL_EIGEN3_API typedef const Scalar* PointerArgType; inline PointerType cast_to_pointer_type(PointerArgType ptr) { return const_cast(ptr); } #else typedef PointerType PointerArgType; inline PointerType cast_to_pointer_type(PointerArgType ptr) { return ptr; } #endif inline Index innerStride() const { return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1; } inline Index outerStride() const { return StrideType::OuterStrideAtCompileTime != 0 ? m_stride.outer() : IsVectorAtCompileTime ? this->size() : int(Flags)&RowMajorBit ? this->cols() : this->rows(); } /** Constructor in the fixed-size case. * * \param dataPtr pointer to the array to map * \param a_stride optional Stride object, passing the strides. */ inline Map(PointerArgType dataPtr, const StrideType& a_stride = StrideType()) : Base(cast_to_pointer_type(dataPtr)), m_stride(a_stride) { PlainObjectType::Base::_check_template_params(); } /** Constructor in the dynamic-size vector case. * * \param dataPtr pointer to the array to map * \param a_size the size of the vector expression * \param a_stride optional Stride object, passing the strides. */ inline Map(PointerArgType dataPtr, Index a_size, const StrideType& a_stride = StrideType()) : Base(cast_to_pointer_type(dataPtr), a_size), m_stride(a_stride) { PlainObjectType::Base::_check_template_params(); } /** Constructor in the dynamic-size matrix case. * * \param dataPtr pointer to the array to map * \param nbRows the number of rows of the matrix expression * \param nbCols the number of columns of the matrix expression * \param a_stride optional Stride object, passing the strides. */ inline Map(PointerArgType dataPtr, Index nbRows, Index nbCols, const StrideType& a_stride = StrideType()) : Base(cast_to_pointer_type(dataPtr), nbRows, nbCols), m_stride(a_stride) { PlainObjectType::Base::_check_template_params(); } EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map) protected: StrideType m_stride; }; template inline Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> ::Array(const Scalar *data) { this->_set_noalias(Eigen::Map(data)); } template inline Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> ::Matrix(const Scalar *data) { this->_set_noalias(Eigen::Map(data)); } } // end namespace Eigen #endif // EIGEN_MAP_H antimony/lib/fab/vendor/Eigen/src/Core/Dot.h0000644000175000017500000002232513341130426020105 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2008, 2010 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_DOT_H #define EIGEN_DOT_H namespace Eigen { namespace internal { // helper function for dot(). The problem is that if we put that in the body of dot(), then upon calling dot // with mismatched types, the compiler emits errors about failing to instantiate cwiseProduct BEFORE // looking at the static assertions. Thus this is a trick to get better compile errors. template struct dot_nocheck { typedef typename scalar_product_traits::Scalar,typename traits::Scalar>::ReturnType ResScalar; static inline ResScalar run(const MatrixBase& a, const MatrixBase& b) { return a.template binaryExpr::Scalar,typename traits::Scalar> >(b).sum(); } }; template struct dot_nocheck { typedef typename scalar_product_traits::Scalar,typename traits::Scalar>::ReturnType ResScalar; static inline ResScalar run(const MatrixBase& a, const MatrixBase& b) { return a.transpose().template binaryExpr::Scalar,typename traits::Scalar> >(b).sum(); } }; } // end namespace internal /** \returns the dot product of *this with other. * * \only_for_vectors * * \note If the scalar type is complex numbers, then this function returns the hermitian * (sesquilinear) dot product, conjugate-linear in the first variable and linear in the * second variable. * * \sa squaredNorm(), norm() */ template template typename internal::scalar_product_traits::Scalar,typename internal::traits::Scalar>::ReturnType MatrixBase::dot(const MatrixBase& other) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived) typedef internal::scalar_conj_product_op func; EIGEN_CHECK_BINARY_COMPATIBILIY(func,Scalar,typename OtherDerived::Scalar); eigen_assert(size() == other.size()); return internal::dot_nocheck::run(*this, other); } #ifdef EIGEN2_SUPPORT /** \returns the dot product of *this with other, with the Eigen2 convention that the dot product is linear in the first variable * (conjugating the second variable). Of course this only makes a difference in the complex case. * * This method is only available in EIGEN2_SUPPORT mode. * * \only_for_vectors * * \sa dot() */ template template typename internal::traits::Scalar MatrixBase::eigen2_dot(const MatrixBase& other) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived) EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) eigen_assert(size() == other.size()); return internal::dot_nocheck::run(other,*this); } #endif //---------- implementation of L2 norm and related functions ---------- /** \returns, for vectors, the squared \em l2 norm of \c *this, and for matrices the Frobenius norm. * In both cases, it consists in the sum of the square of all the matrix entries. * For vectors, this is also equals to the dot product of \c *this with itself. * * \sa dot(), norm() */ template EIGEN_STRONG_INLINE typename NumTraits::Scalar>::Real MatrixBase::squaredNorm() const { return numext::real((*this).cwiseAbs2().sum()); } /** \returns, for vectors, the \em l2 norm of \c *this, and for matrices the Frobenius norm. * In both cases, it consists in the square root of the sum of the square of all the matrix entries. * For vectors, this is also equals to the square root of the dot product of \c *this with itself. * * \sa dot(), squaredNorm() */ template inline typename NumTraits::Scalar>::Real MatrixBase::norm() const { using std::sqrt; return sqrt(squaredNorm()); } /** \returns an expression of the quotient of *this by its own norm. * * \only_for_vectors * * \sa norm(), normalize() */ template inline const typename MatrixBase::PlainObject MatrixBase::normalized() const { typedef typename internal::nested::type Nested; typedef typename internal::remove_reference::type _Nested; _Nested n(derived()); return n / n.norm(); } /** Normalizes the vector, i.e. divides it by its own norm. * * \only_for_vectors * * \sa norm(), normalized() */ template inline void MatrixBase::normalize() { *this /= norm(); } //---------- implementation of other norms ---------- namespace internal { template struct lpNorm_selector { typedef typename NumTraits::Scalar>::Real RealScalar; static inline RealScalar run(const MatrixBase& m) { using std::pow; return pow(m.cwiseAbs().array().pow(p).sum(), RealScalar(1)/p); } }; template struct lpNorm_selector { static inline typename NumTraits::Scalar>::Real run(const MatrixBase& m) { return m.cwiseAbs().sum(); } }; template struct lpNorm_selector { static inline typename NumTraits::Scalar>::Real run(const MatrixBase& m) { return m.norm(); } }; template struct lpNorm_selector { static inline typename NumTraits::Scalar>::Real run(const MatrixBase& m) { return m.cwiseAbs().maxCoeff(); } }; } // end namespace internal /** \returns the \f$ \ell^p \f$ norm of *this, that is, returns the p-th root of the sum of the p-th powers of the absolute values * of the coefficients of *this. If \a p is the special value \a Eigen::Infinity, this function returns the \f$ \ell^\infty \f$ * norm, that is the maximum of the absolute values of the coefficients of *this. * * \sa norm() */ template template inline typename NumTraits::Scalar>::Real MatrixBase::lpNorm() const { return internal::lpNorm_selector::run(*this); } //---------- implementation of isOrthogonal / isUnitary ---------- /** \returns true if *this is approximately orthogonal to \a other, * within the precision given by \a prec. * * Example: \include MatrixBase_isOrthogonal.cpp * Output: \verbinclude MatrixBase_isOrthogonal.out */ template template bool MatrixBase::isOrthogonal (const MatrixBase& other, const RealScalar& prec) const { typename internal::nested::type nested(derived()); typename internal::nested::type otherNested(other.derived()); return numext::abs2(nested.dot(otherNested)) <= prec * prec * nested.squaredNorm() * otherNested.squaredNorm(); } /** \returns true if *this is approximately an unitary matrix, * within the precision given by \a prec. In the case where the \a Scalar * type is real numbers, a unitary matrix is an orthogonal matrix, whence the name. * * \note This can be used to check whether a family of vectors forms an orthonormal basis. * Indeed, \c m.isUnitary() returns true if and only if the columns (equivalently, the rows) of m form an * orthonormal basis. * * Example: \include MatrixBase_isUnitary.cpp * Output: \verbinclude MatrixBase_isUnitary.out */ template bool MatrixBase::isUnitary(const RealScalar& prec) const { typename Derived::Nested nested(derived()); for(Index i = 0; i < cols(); ++i) { if(!internal::isApprox(nested.col(i).squaredNorm(), static_cast(1), prec)) return false; for(Index j = 0; j < i; ++j) if(!internal::isMuchSmallerThan(nested.col(i).dot(nested.col(j)), static_cast(1), prec)) return false; } return true; } } // end namespace Eigen #endif // EIGEN_DOT_H antimony/lib/fab/vendor/Eigen/src/Core/ProductBase.h0000644000175000017500000002607013341130426021573 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PRODUCTBASE_H #define EIGEN_PRODUCTBASE_H namespace Eigen { /** \class ProductBase * \ingroup Core_Module * */ namespace internal { template struct traits > { typedef MatrixXpr XprKind; typedef typename remove_all<_Lhs>::type Lhs; typedef typename remove_all<_Rhs>::type Rhs; typedef typename scalar_product_traits::ReturnType Scalar; typedef typename promote_storage_type::StorageKind, typename traits::StorageKind>::ret StorageKind; typedef typename promote_index_type::Index, typename traits::Index>::type Index; enum { RowsAtCompileTime = traits::RowsAtCompileTime, ColsAtCompileTime = traits::ColsAtCompileTime, MaxRowsAtCompileTime = traits::MaxRowsAtCompileTime, MaxColsAtCompileTime = traits::MaxColsAtCompileTime, Flags = (MaxRowsAtCompileTime==1 ? RowMajorBit : 0) | EvalBeforeNestingBit | EvalBeforeAssigningBit | NestByRefBit, // Note that EvalBeforeNestingBit and NestByRefBit // are not used in practice because nested is overloaded for products CoeffReadCost = 0 // FIXME why is it needed ? }; }; } #define EIGEN_PRODUCT_PUBLIC_INTERFACE(Derived) \ typedef ProductBase Base; \ EIGEN_DENSE_PUBLIC_INTERFACE(Derived) \ typedef typename Base::LhsNested LhsNested; \ typedef typename Base::_LhsNested _LhsNested; \ typedef typename Base::LhsBlasTraits LhsBlasTraits; \ typedef typename Base::ActualLhsType ActualLhsType; \ typedef typename Base::_ActualLhsType _ActualLhsType; \ typedef typename Base::RhsNested RhsNested; \ typedef typename Base::_RhsNested _RhsNested; \ typedef typename Base::RhsBlasTraits RhsBlasTraits; \ typedef typename Base::ActualRhsType ActualRhsType; \ typedef typename Base::_ActualRhsType _ActualRhsType; \ using Base::m_lhs; \ using Base::m_rhs; template class ProductBase : public MatrixBase { public: typedef MatrixBase Base; EIGEN_DENSE_PUBLIC_INTERFACE(ProductBase) typedef typename Lhs::Nested LhsNested; typedef typename internal::remove_all::type _LhsNested; typedef internal::blas_traits<_LhsNested> LhsBlasTraits; typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; typedef typename internal::remove_all::type _ActualLhsType; typedef typename internal::traits::Scalar LhsScalar; typedef typename Rhs::Nested RhsNested; typedef typename internal::remove_all::type _RhsNested; typedef internal::blas_traits<_RhsNested> RhsBlasTraits; typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; typedef typename internal::remove_all::type _ActualRhsType; typedef typename internal::traits::Scalar RhsScalar; // Diagonal of a product: no need to evaluate the arguments because they are going to be evaluated only once typedef CoeffBasedProduct FullyLazyCoeffBaseProductType; public: #ifndef EIGEN_NO_MALLOC typedef typename Base::PlainObject BasePlainObject; typedef Matrix DynPlainObject; typedef typename internal::conditional<(BasePlainObject::SizeAtCompileTime==Dynamic) || (BasePlainObject::SizeAtCompileTime*int(sizeof(Scalar)) < int(EIGEN_STACK_ALLOCATION_LIMIT)), BasePlainObject, DynPlainObject>::type PlainObject; #else typedef typename Base::PlainObject PlainObject; #endif ProductBase(const Lhs& a_lhs, const Rhs& a_rhs) : m_lhs(a_lhs), m_rhs(a_rhs) { eigen_assert(a_lhs.cols() == a_rhs.rows() && "invalid matrix product" && "if you wanted a coeff-wise or a dot product use the respective explicit functions"); } inline Index rows() const { return m_lhs.rows(); } inline Index cols() const { return m_rhs.cols(); } template inline void evalTo(Dest& dst) const { dst.setZero(); scaleAndAddTo(dst,Scalar(1)); } template inline void addTo(Dest& dst) const { scaleAndAddTo(dst,Scalar(1)); } template inline void subTo(Dest& dst) const { scaleAndAddTo(dst,Scalar(-1)); } template inline void scaleAndAddTo(Dest& dst, const Scalar& alpha) const { derived().scaleAndAddTo(dst,alpha); } const _LhsNested& lhs() const { return m_lhs; } const _RhsNested& rhs() const { return m_rhs; } // Implicit conversion to the nested type (trigger the evaluation of the product) operator const PlainObject& () const { m_result.resize(m_lhs.rows(), m_rhs.cols()); derived().evalTo(m_result); return m_result; } const Diagonal diagonal() const { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs); } template const Diagonal diagonal() const { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs); } const Diagonal diagonal(Index index) const { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs).diagonal(index); } // restrict coeff accessors to 1x1 expressions. No need to care about mutators here since this isnt a Lvalue expression typename Base::CoeffReturnType coeff(Index row, Index col) const { #ifdef EIGEN2_SUPPORT return lhs().row(row).cwiseProduct(rhs().col(col).transpose()).sum(); #else EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) eigen_assert(this->rows() == 1 && this->cols() == 1); Matrix result = *this; return result.coeff(row,col); #endif } typename Base::CoeffReturnType coeff(Index i) const { EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) eigen_assert(this->rows() == 1 && this->cols() == 1); Matrix result = *this; return result.coeff(i); } const Scalar& coeffRef(Index row, Index col) const { EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) eigen_assert(this->rows() == 1 && this->cols() == 1); return derived().coeffRef(row,col); } const Scalar& coeffRef(Index i) const { EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) eigen_assert(this->rows() == 1 && this->cols() == 1); return derived().coeffRef(i); } protected: LhsNested m_lhs; RhsNested m_rhs; mutable PlainObject m_result; }; // here we need to overload the nested rule for products // such that the nested type is a const reference to a plain matrix namespace internal { template struct nested, N, PlainObject> { typedef typename GeneralProduct::PlainObject const& type; }; template struct nested, N, PlainObject> { typedef typename GeneralProduct::PlainObject const& type; }; } template class ScaledProduct; // Note that these two operator* functions are not defined as member // functions of ProductBase, because, otherwise we would have to // define all overloads defined in MatrixBase. Furthermore, Using // "using Base::operator*" would not work with MSVC. // // Also note that here we accept any compatible scalar types template const ScaledProduct operator*(const ProductBase& prod, const typename Derived::Scalar& x) { return ScaledProduct(prod.derived(), x); } template typename internal::enable_if::value, const ScaledProduct >::type operator*(const ProductBase& prod, const typename Derived::RealScalar& x) { return ScaledProduct(prod.derived(), x); } template const ScaledProduct operator*(const typename Derived::Scalar& x,const ProductBase& prod) { return ScaledProduct(prod.derived(), x); } template typename internal::enable_if::value, const ScaledProduct >::type operator*(const typename Derived::RealScalar& x,const ProductBase& prod) { return ScaledProduct(prod.derived(), x); } namespace internal { template struct traits > : traits, typename NestedProduct::_LhsNested, typename NestedProduct::_RhsNested> > { typedef typename traits::StorageKind StorageKind; }; } template class ScaledProduct : public ProductBase, typename NestedProduct::_LhsNested, typename NestedProduct::_RhsNested> { public: typedef ProductBase, typename NestedProduct::_LhsNested, typename NestedProduct::_RhsNested> Base; typedef typename Base::Scalar Scalar; typedef typename Base::PlainObject PlainObject; // EIGEN_PRODUCT_PUBLIC_INTERFACE(ScaledProduct) ScaledProduct(const NestedProduct& prod, const Scalar& x) : Base(prod.lhs(),prod.rhs()), m_prod(prod), m_alpha(x) {} template inline void evalTo(Dest& dst) const { dst.setZero(); scaleAndAddTo(dst, Scalar(1)); } template inline void addTo(Dest& dst) const { scaleAndAddTo(dst, Scalar(1)); } template inline void subTo(Dest& dst) const { scaleAndAddTo(dst, Scalar(-1)); } template inline void scaleAndAddTo(Dest& dst, const Scalar& a_alpha) const { m_prod.derived().scaleAndAddTo(dst,a_alpha * m_alpha); } const Scalar& alpha() const { return m_alpha; } protected: const NestedProduct& m_prod; Scalar m_alpha; }; /** \internal * Overloaded to perform an efficient C = (A*B).lazy() */ template template Derived& MatrixBase::lazyAssign(const ProductBase& other) { other.derived().evalTo(derived()); return derived(); } } // end namespace Eigen #endif // EIGEN_PRODUCTBASE_H antimony/lib/fab/vendor/Eigen/src/Core/DenseCoeffsBase.h0000644000175000017500000006576313341130426022353 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2010 Benoit Jacob // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_DENSECOEFFSBASE_H #define EIGEN_DENSECOEFFSBASE_H namespace Eigen { namespace internal { template struct add_const_on_value_type_if_arithmetic { typedef typename conditional::value, T, typename add_const_on_value_type::type>::type type; }; } /** \brief Base class providing read-only coefficient access to matrices and arrays. * \ingroup Core_Module * \tparam Derived Type of the derived class * \tparam #ReadOnlyAccessors Constant indicating read-only access * * This class defines the \c operator() \c const function and friends, which can be used to read specific * entries of a matrix or array. * * \sa DenseCoeffsBase, DenseCoeffsBase, * \ref TopicClassHierarchy */ template class DenseCoeffsBase : public EigenBase { public: typedef typename internal::traits::StorageKind StorageKind; typedef typename internal::traits::Index Index; typedef typename internal::traits::Scalar Scalar; typedef typename internal::packet_traits::type PacketScalar; // Explanation for this CoeffReturnType typedef. // - This is the return type of the coeff() method. // - The LvalueBit means exactly that we can offer a coeffRef() method, which means exactly that we can get references // to coeffs, which means exactly that we can have coeff() return a const reference (as opposed to returning a value). // - The is_artihmetic check is required since "const int", "const double", etc. will cause warnings on some systems // while the declaration of "const T", where T is a non arithmetic type does not. Always returning "const Scalar&" is // not possible, since the underlying expressions might not offer a valid address the reference could be referring to. typedef typename internal::conditional::Flags&LvalueBit), const Scalar&, typename internal::conditional::value, Scalar, const Scalar>::type >::type CoeffReturnType; typedef typename internal::add_const_on_value_type_if_arithmetic< typename internal::packet_traits::type >::type PacketReturnType; typedef EigenBase Base; using Base::rows; using Base::cols; using Base::size; using Base::derived; EIGEN_STRONG_INLINE Index rowIndexByOuterInner(Index outer, Index inner) const { return int(Derived::RowsAtCompileTime) == 1 ? 0 : int(Derived::ColsAtCompileTime) == 1 ? inner : int(Derived::Flags)&RowMajorBit ? outer : inner; } EIGEN_STRONG_INLINE Index colIndexByOuterInner(Index outer, Index inner) const { return int(Derived::ColsAtCompileTime) == 1 ? 0 : int(Derived::RowsAtCompileTime) == 1 ? inner : int(Derived::Flags)&RowMajorBit ? inner : outer; } /** Short version: don't use this function, use * \link operator()(Index,Index) const \endlink instead. * * Long version: this function is similar to * \link operator()(Index,Index) const \endlink, but without the assertion. * Use this for limiting the performance cost of debugging code when doing * repeated coefficient access. Only use this when it is guaranteed that the * parameters \a row and \a col are in range. * * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this * function equivalent to \link operator()(Index,Index) const \endlink. * * \sa operator()(Index,Index) const, coeffRef(Index,Index), coeff(Index) const */ EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); return derived().coeff(row, col); } EIGEN_STRONG_INLINE CoeffReturnType coeffByOuterInner(Index outer, Index inner) const { return coeff(rowIndexByOuterInner(outer, inner), colIndexByOuterInner(outer, inner)); } /** \returns the coefficient at given the given row and column. * * \sa operator()(Index,Index), operator[](Index) */ EIGEN_STRONG_INLINE CoeffReturnType operator()(Index row, Index col) const { eigen_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); return derived().coeff(row, col); } /** Short version: don't use this function, use * \link operator[](Index) const \endlink instead. * * Long version: this function is similar to * \link operator[](Index) const \endlink, but without the assertion. * Use this for limiting the performance cost of debugging code when doing * repeated coefficient access. Only use this when it is guaranteed that the * parameter \a index is in range. * * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this * function equivalent to \link operator[](Index) const \endlink. * * \sa operator[](Index) const, coeffRef(Index), coeff(Index,Index) const */ EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { eigen_internal_assert(index >= 0 && index < size()); return derived().coeff(index); } /** \returns the coefficient at given index. * * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. * * \sa operator[](Index), operator()(Index,Index) const, x() const, y() const, * z() const, w() const */ EIGEN_STRONG_INLINE CoeffReturnType operator[](Index index) const { #ifndef EIGEN2_SUPPORT EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime, THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD) #endif eigen_assert(index >= 0 && index < size()); return derived().coeff(index); } /** \returns the coefficient at given index. * * This is synonymous to operator[](Index) const. * * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. * * \sa operator[](Index), operator()(Index,Index) const, x() const, y() const, * z() const, w() const */ EIGEN_STRONG_INLINE CoeffReturnType operator()(Index index) const { eigen_assert(index >= 0 && index < size()); return derived().coeff(index); } /** equivalent to operator[](0). */ EIGEN_STRONG_INLINE CoeffReturnType x() const { return (*this)[0]; } /** equivalent to operator[](1). */ EIGEN_STRONG_INLINE CoeffReturnType y() const { return (*this)[1]; } /** equivalent to operator[](2). */ EIGEN_STRONG_INLINE CoeffReturnType z() const { return (*this)[2]; } /** equivalent to operator[](3). */ EIGEN_STRONG_INLINE CoeffReturnType w() const { return (*this)[3]; } /** \internal * \returns the packet of coefficients starting at the given row and column. It is your responsibility * to ensure that a packet really starts there. This method is only available on expressions having the * PacketAccessBit. * * The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets * starting at an address which is a multiple of the packet size. */ template EIGEN_STRONG_INLINE PacketReturnType packet(Index row, Index col) const { eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); return derived().template packet(row,col); } /** \internal */ template EIGEN_STRONG_INLINE PacketReturnType packetByOuterInner(Index outer, Index inner) const { return packet(rowIndexByOuterInner(outer, inner), colIndexByOuterInner(outer, inner)); } /** \internal * \returns the packet of coefficients starting at the given index. It is your responsibility * to ensure that a packet really starts there. This method is only available on expressions having the * PacketAccessBit and the LinearAccessBit. * * The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets * starting at an address which is a multiple of the packet size. */ template EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const { eigen_internal_assert(index >= 0 && index < size()); return derived().template packet(index); } protected: // explanation: DenseBase is doing "using ..." on the methods from DenseCoeffsBase. // But some methods are only available in the DirectAccess case. // So we add dummy methods here with these names, so that "using... " doesn't fail. // It's not private so that the child class DenseBase can access them, and it's not public // either since it's an implementation detail, so has to be protected. void coeffRef(); void coeffRefByOuterInner(); void writePacket(); void writePacketByOuterInner(); void copyCoeff(); void copyCoeffByOuterInner(); void copyPacket(); void copyPacketByOuterInner(); void stride(); void innerStride(); void outerStride(); void rowStride(); void colStride(); }; /** \brief Base class providing read/write coefficient access to matrices and arrays. * \ingroup Core_Module * \tparam Derived Type of the derived class * \tparam #WriteAccessors Constant indicating read/write access * * This class defines the non-const \c operator() function and friends, which can be used to write specific * entries of a matrix or array. This class inherits DenseCoeffsBase which * defines the const variant for reading specific entries. * * \sa DenseCoeffsBase, \ref TopicClassHierarchy */ template class DenseCoeffsBase : public DenseCoeffsBase { public: typedef DenseCoeffsBase Base; typedef typename internal::traits::StorageKind StorageKind; typedef typename internal::traits::Index Index; typedef typename internal::traits::Scalar Scalar; typedef typename internal::packet_traits::type PacketScalar; typedef typename NumTraits::Real RealScalar; using Base::coeff; using Base::rows; using Base::cols; using Base::size; using Base::derived; using Base::rowIndexByOuterInner; using Base::colIndexByOuterInner; using Base::operator[]; using Base::operator(); using Base::x; using Base::y; using Base::z; using Base::w; /** Short version: don't use this function, use * \link operator()(Index,Index) \endlink instead. * * Long version: this function is similar to * \link operator()(Index,Index) \endlink, but without the assertion. * Use this for limiting the performance cost of debugging code when doing * repeated coefficient access. Only use this when it is guaranteed that the * parameters \a row and \a col are in range. * * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this * function equivalent to \link operator()(Index,Index) \endlink. * * \sa operator()(Index,Index), coeff(Index, Index) const, coeffRef(Index) */ EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); return derived().coeffRef(row, col); } EIGEN_STRONG_INLINE Scalar& coeffRefByOuterInner(Index outer, Index inner) { return coeffRef(rowIndexByOuterInner(outer, inner), colIndexByOuterInner(outer, inner)); } /** \returns a reference to the coefficient at given the given row and column. * * \sa operator[](Index) */ EIGEN_STRONG_INLINE Scalar& operator()(Index row, Index col) { eigen_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); return derived().coeffRef(row, col); } /** Short version: don't use this function, use * \link operator[](Index) \endlink instead. * * Long version: this function is similar to * \link operator[](Index) \endlink, but without the assertion. * Use this for limiting the performance cost of debugging code when doing * repeated coefficient access. Only use this when it is guaranteed that the * parameters \a row and \a col are in range. * * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this * function equivalent to \link operator[](Index) \endlink. * * \sa operator[](Index), coeff(Index) const, coeffRef(Index,Index) */ EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { eigen_internal_assert(index >= 0 && index < size()); return derived().coeffRef(index); } /** \returns a reference to the coefficient at given index. * * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. * * \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w() */ EIGEN_STRONG_INLINE Scalar& operator[](Index index) { #ifndef EIGEN2_SUPPORT EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime, THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD) #endif eigen_assert(index >= 0 && index < size()); return derived().coeffRef(index); } /** \returns a reference to the coefficient at given index. * * This is synonymous to operator[](Index). * * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. * * \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w() */ EIGEN_STRONG_INLINE Scalar& operator()(Index index) { eigen_assert(index >= 0 && index < size()); return derived().coeffRef(index); } /** equivalent to operator[](0). */ EIGEN_STRONG_INLINE Scalar& x() { return (*this)[0]; } /** equivalent to operator[](1). */ EIGEN_STRONG_INLINE Scalar& y() { return (*this)[1]; } /** equivalent to operator[](2). */ EIGEN_STRONG_INLINE Scalar& z() { return (*this)[2]; } /** equivalent to operator[](3). */ EIGEN_STRONG_INLINE Scalar& w() { return (*this)[3]; } /** \internal * Stores the given packet of coefficients, at the given row and column of this expression. It is your responsibility * to ensure that a packet really starts there. This method is only available on expressions having the * PacketAccessBit. * * The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets * starting at an address which is a multiple of the packet size. */ template EIGEN_STRONG_INLINE void writePacket (Index row, Index col, const typename internal::packet_traits::type& val) { eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); derived().template writePacket(row,col,val); } /** \internal */ template EIGEN_STRONG_INLINE void writePacketByOuterInner (Index outer, Index inner, const typename internal::packet_traits::type& val) { writePacket(rowIndexByOuterInner(outer, inner), colIndexByOuterInner(outer, inner), val); } /** \internal * Stores the given packet of coefficients, at the given index in this expression. It is your responsibility * to ensure that a packet really starts there. This method is only available on expressions having the * PacketAccessBit and the LinearAccessBit. * * The \a LoadMode parameter may have the value \a Aligned or \a Unaligned. Its effect is to select * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets * starting at an address which is a multiple of the packet size. */ template EIGEN_STRONG_INLINE void writePacket (Index index, const typename internal::packet_traits::type& val) { eigen_internal_assert(index >= 0 && index < size()); derived().template writePacket(index,val); } #ifndef EIGEN_PARSED_BY_DOXYGEN /** \internal Copies the coefficient at position (row,col) of other into *this. * * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code * with usual assignments. * * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox. */ template EIGEN_STRONG_INLINE void copyCoeff(Index row, Index col, const DenseBase& other) { eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); derived().coeffRef(row, col) = other.derived().coeff(row, col); } /** \internal Copies the coefficient at the given index of other into *this. * * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code * with usual assignments. * * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox. */ template EIGEN_STRONG_INLINE void copyCoeff(Index index, const DenseBase& other) { eigen_internal_assert(index >= 0 && index < size()); derived().coeffRef(index) = other.derived().coeff(index); } template EIGEN_STRONG_INLINE void copyCoeffByOuterInner(Index outer, Index inner, const DenseBase& other) { const Index row = rowIndexByOuterInner(outer,inner); const Index col = colIndexByOuterInner(outer,inner); // derived() is important here: copyCoeff() may be reimplemented in Derived! derived().copyCoeff(row, col, other); } /** \internal Copies the packet at position (row,col) of other into *this. * * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code * with usual assignments. * * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox. */ template EIGEN_STRONG_INLINE void copyPacket(Index row, Index col, const DenseBase& other) { eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); derived().template writePacket(row, col, other.derived().template packet(row, col)); } /** \internal Copies the packet at the given index of other into *this. * * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code * with usual assignments. * * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox. */ template EIGEN_STRONG_INLINE void copyPacket(Index index, const DenseBase& other) { eigen_internal_assert(index >= 0 && index < size()); derived().template writePacket(index, other.derived().template packet(index)); } /** \internal */ template EIGEN_STRONG_INLINE void copyPacketByOuterInner(Index outer, Index inner, const DenseBase& other) { const Index row = rowIndexByOuterInner(outer,inner); const Index col = colIndexByOuterInner(outer,inner); // derived() is important here: copyCoeff() may be reimplemented in Derived! derived().template copyPacket< OtherDerived, StoreMode, LoadMode>(row, col, other); } #endif }; /** \brief Base class providing direct read-only coefficient access to matrices and arrays. * \ingroup Core_Module * \tparam Derived Type of the derived class * \tparam #DirectAccessors Constant indicating direct access * * This class defines functions to work with strides which can be used to access entries directly. This class * inherits DenseCoeffsBase which defines functions to access entries read-only using * \c operator() . * * \sa \ref TopicClassHierarchy */ template class DenseCoeffsBase : public DenseCoeffsBase { public: typedef DenseCoeffsBase Base; typedef typename internal::traits::Index Index; typedef typename internal::traits::Scalar Scalar; typedef typename NumTraits::Real RealScalar; using Base::rows; using Base::cols; using Base::size; using Base::derived; /** \returns the pointer increment between two consecutive elements within a slice in the inner direction. * * \sa outerStride(), rowStride(), colStride() */ inline Index innerStride() const { return derived().innerStride(); } /** \returns the pointer increment between two consecutive inner slices (for example, between two consecutive columns * in a column-major matrix). * * \sa innerStride(), rowStride(), colStride() */ inline Index outerStride() const { return derived().outerStride(); } // FIXME shall we remove it ? inline Index stride() const { return Derived::IsVectorAtCompileTime ? innerStride() : outerStride(); } /** \returns the pointer increment between two consecutive rows. * * \sa innerStride(), outerStride(), colStride() */ inline Index rowStride() const { return Derived::IsRowMajor ? outerStride() : innerStride(); } /** \returns the pointer increment between two consecutive columns. * * \sa innerStride(), outerStride(), rowStride() */ inline Index colStride() const { return Derived::IsRowMajor ? innerStride() : outerStride(); } }; /** \brief Base class providing direct read/write coefficient access to matrices and arrays. * \ingroup Core_Module * \tparam Derived Type of the derived class * \tparam #DirectWriteAccessors Constant indicating direct access * * This class defines functions to work with strides which can be used to access entries directly. This class * inherits DenseCoeffsBase which defines functions to access entries read/write using * \c operator(). * * \sa \ref TopicClassHierarchy */ template class DenseCoeffsBase : public DenseCoeffsBase { public: typedef DenseCoeffsBase Base; typedef typename internal::traits::Index Index; typedef typename internal::traits::Scalar Scalar; typedef typename NumTraits::Real RealScalar; using Base::rows; using Base::cols; using Base::size; using Base::derived; /** \returns the pointer increment between two consecutive elements within a slice in the inner direction. * * \sa outerStride(), rowStride(), colStride() */ inline Index innerStride() const { return derived().innerStride(); } /** \returns the pointer increment between two consecutive inner slices (for example, between two consecutive columns * in a column-major matrix). * * \sa innerStride(), rowStride(), colStride() */ inline Index outerStride() const { return derived().outerStride(); } // FIXME shall we remove it ? inline Index stride() const { return Derived::IsVectorAtCompileTime ? innerStride() : outerStride(); } /** \returns the pointer increment between two consecutive rows. * * \sa innerStride(), outerStride(), colStride() */ inline Index rowStride() const { return Derived::IsRowMajor ? outerStride() : innerStride(); } /** \returns the pointer increment between two consecutive columns. * * \sa innerStride(), outerStride(), rowStride() */ inline Index colStride() const { return Derived::IsRowMajor ? innerStride() : outerStride(); } }; namespace internal { template struct first_aligned_impl { static inline typename Derived::Index run(const Derived&) { return 0; } }; template struct first_aligned_impl { static inline typename Derived::Index run(const Derived& m) { return internal::first_aligned(&m.const_cast_derived().coeffRef(0,0), m.size()); } }; /** \internal \returns the index of the first element of the array that is well aligned for vectorization. * * There is also the variant first_aligned(const Scalar*, Integer) defined in Memory.h. See it for more * documentation. */ template static inline typename Derived::Index first_aligned(const Derived& m) { return first_aligned_impl ::run(m); } template::ret> struct inner_stride_at_compile_time { enum { ret = traits::InnerStrideAtCompileTime }; }; template struct inner_stride_at_compile_time { enum { ret = 0 }; }; template::ret> struct outer_stride_at_compile_time { enum { ret = traits::OuterStrideAtCompileTime }; }; template struct outer_stride_at_compile_time { enum { ret = 0 }; }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_DENSECOEFFSBASE_H antimony/lib/fab/vendor/Eigen/src/Core/ArrayBase.h0000644000175000017500000002047013341130426021227 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ARRAYBASE_H #define EIGEN_ARRAYBASE_H namespace Eigen { template class MatrixWrapper; /** \class ArrayBase * \ingroup Core_Module * * \brief Base class for all 1D and 2D array, and related expressions * * An array is similar to a dense vector or matrix. While matrices are mathematical * objects with well defined linear algebra operators, an array is just a collection * of scalar values arranged in a one or two dimensionnal fashion. As the main consequence, * all operations applied to an array are performed coefficient wise. Furthermore, * arrays support scalar math functions of the c++ standard library (e.g., std::sin(x)), and convenient * constructors allowing to easily write generic code working for both scalar values * and arrays. * * This class is the base that is inherited by all array expression types. * * \tparam Derived is the derived type, e.g., an array or an expression type. * * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_ARRAYBASE_PLUGIN. * * \sa class MatrixBase, \ref TopicClassHierarchy */ template class ArrayBase : public DenseBase { public: #ifndef EIGEN_PARSED_BY_DOXYGEN /** The base class for a given storage type. */ typedef ArrayBase StorageBaseType; typedef ArrayBase Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl; using internal::special_scalar_op_base::Scalar, typename NumTraits::Scalar>::Real>::operator*; typedef typename internal::traits::StorageKind StorageKind; typedef typename internal::traits::Index Index; typedef typename internal::traits::Scalar Scalar; typedef typename internal::packet_traits::type PacketScalar; typedef typename NumTraits::Real RealScalar; typedef DenseBase Base; using Base::RowsAtCompileTime; using Base::ColsAtCompileTime; using Base::SizeAtCompileTime; using Base::MaxRowsAtCompileTime; using Base::MaxColsAtCompileTime; using Base::MaxSizeAtCompileTime; using Base::IsVectorAtCompileTime; using Base::Flags; using Base::CoeffReadCost; using Base::derived; using Base::const_cast_derived; using Base::rows; using Base::cols; using Base::size; using Base::coeff; using Base::coeffRef; using Base::lazyAssign; using Base::operator=; using Base::operator+=; using Base::operator-=; using Base::operator*=; using Base::operator/=; typedef typename Base::CoeffReturnType CoeffReturnType; #endif // not EIGEN_PARSED_BY_DOXYGEN #ifndef EIGEN_PARSED_BY_DOXYGEN /** \internal the plain matrix type corresponding to this expression. Note that is not necessarily * exactly the return type of eval(): in the case of plain matrices, the return type of eval() is a const * reference to a matrix, not a matrix! It is however guaranteed that the return type of eval() is either * PlainObject or const PlainObject&. */ typedef Array::Scalar, internal::traits::RowsAtCompileTime, internal::traits::ColsAtCompileTime, AutoAlign | (internal::traits::Flags&RowMajorBit ? RowMajor : ColMajor), internal::traits::MaxRowsAtCompileTime, internal::traits::MaxColsAtCompileTime > PlainObject; /** \internal Represents a matrix with all coefficients equal to one another*/ typedef CwiseNullaryOp,Derived> ConstantReturnType; #endif // not EIGEN_PARSED_BY_DOXYGEN #define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::ArrayBase # include "../plugins/CommonCwiseUnaryOps.h" # include "../plugins/MatrixCwiseUnaryOps.h" # include "../plugins/ArrayCwiseUnaryOps.h" # include "../plugins/CommonCwiseBinaryOps.h" # include "../plugins/MatrixCwiseBinaryOps.h" # include "../plugins/ArrayCwiseBinaryOps.h" # ifdef EIGEN_ARRAYBASE_PLUGIN # include EIGEN_ARRAYBASE_PLUGIN # endif #undef EIGEN_CURRENT_STORAGE_BASE_CLASS /** Special case of the template operator=, in order to prevent the compiler * from generating a default operator= (issue hit with g++ 4.1) */ Derived& operator=(const ArrayBase& other) { return internal::assign_selector::run(derived(), other.derived()); } Derived& operator+=(const Scalar& scalar) { return *this = derived() + scalar; } Derived& operator-=(const Scalar& scalar) { return *this = derived() - scalar; } template Derived& operator+=(const ArrayBase& other); template Derived& operator-=(const ArrayBase& other); template Derived& operator*=(const ArrayBase& other); template Derived& operator/=(const ArrayBase& other); public: ArrayBase& array() { return *this; } const ArrayBase& array() const { return *this; } /** \returns an \link Eigen::MatrixBase Matrix \endlink expression of this array * \sa MatrixBase::array() */ MatrixWrapper matrix() { return derived(); } const MatrixWrapper matrix() const { return derived(); } // template // inline void evalTo(Dest& dst) const { dst = matrix(); } protected: ArrayBase() : Base() {} private: explicit ArrayBase(Index); ArrayBase(Index,Index); template explicit ArrayBase(const ArrayBase&); protected: // mixing arrays and matrices is not legal template Derived& operator+=(const MatrixBase& ) {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;} // mixing arrays and matrices is not legal template Derived& operator-=(const MatrixBase& ) {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;} }; /** replaces \c *this by \c *this - \a other. * * \returns a reference to \c *this */ template template EIGEN_STRONG_INLINE Derived & ArrayBase::operator-=(const ArrayBase &other) { SelfCwiseBinaryOp, Derived, OtherDerived> tmp(derived()); tmp = other.derived(); return derived(); } /** replaces \c *this by \c *this + \a other. * * \returns a reference to \c *this */ template template EIGEN_STRONG_INLINE Derived & ArrayBase::operator+=(const ArrayBase& other) { SelfCwiseBinaryOp, Derived, OtherDerived> tmp(derived()); tmp = other.derived(); return derived(); } /** replaces \c *this by \c *this * \a other coefficient wise. * * \returns a reference to \c *this */ template template EIGEN_STRONG_INLINE Derived & ArrayBase::operator*=(const ArrayBase& other) { SelfCwiseBinaryOp, Derived, OtherDerived> tmp(derived()); tmp = other.derived(); return derived(); } /** replaces \c *this by \c *this / \a other coefficient wise. * * \returns a reference to \c *this */ template template EIGEN_STRONG_INLINE Derived & ArrayBase::operator/=(const ArrayBase& other) { SelfCwiseBinaryOp, Derived, OtherDerived> tmp(derived()); tmp = other.derived(); return derived(); } } // end namespace Eigen #endif // EIGEN_ARRAYBASE_H antimony/lib/fab/vendor/Eigen/src/Core/BooleanRedux.h0000644000175000017500000000752413341130426021752 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ALLANDANY_H #define EIGEN_ALLANDANY_H namespace Eigen { namespace internal { template struct all_unroller { enum { col = (UnrollCount-1) / Derived::RowsAtCompileTime, row = (UnrollCount-1) % Derived::RowsAtCompileTime }; static inline bool run(const Derived &mat) { return all_unroller::run(mat) && mat.coeff(row, col); } }; template struct all_unroller { static inline bool run(const Derived &/*mat*/) { return true; } }; template struct all_unroller { static inline bool run(const Derived &) { return false; } }; template struct any_unroller { enum { col = (UnrollCount-1) / Derived::RowsAtCompileTime, row = (UnrollCount-1) % Derived::RowsAtCompileTime }; static inline bool run(const Derived &mat) { return any_unroller::run(mat) || mat.coeff(row, col); } }; template struct any_unroller { static inline bool run(const Derived & /*mat*/) { return false; } }; template struct any_unroller { static inline bool run(const Derived &) { return false; } }; } // end namespace internal /** \returns true if all coefficients are true * * Example: \include MatrixBase_all.cpp * Output: \verbinclude MatrixBase_all.out * * \sa any(), Cwise::operator<() */ template inline bool DenseBase::all() const { enum { unroll = SizeAtCompileTime != Dynamic && CoeffReadCost != Dynamic && NumTraits::AddCost != Dynamic && SizeAtCompileTime * (CoeffReadCost + NumTraits::AddCost) <= EIGEN_UNROLLING_LIMIT }; if(unroll) return internal::all_unroller::run(derived()); else { for(Index j = 0; j < cols(); ++j) for(Index i = 0; i < rows(); ++i) if (!coeff(i, j)) return false; return true; } } /** \returns true if at least one coefficient is true * * \sa all() */ template inline bool DenseBase::any() const { enum { unroll = SizeAtCompileTime != Dynamic && CoeffReadCost != Dynamic && NumTraits::AddCost != Dynamic && SizeAtCompileTime * (CoeffReadCost + NumTraits::AddCost) <= EIGEN_UNROLLING_LIMIT }; if(unroll) return internal::any_unroller::run(derived()); else { for(Index j = 0; j < cols(); ++j) for(Index i = 0; i < rows(); ++i) if (coeff(i, j)) return true; return false; } } /** \returns the number of coefficients which evaluate to true * * \sa all(), any() */ template inline typename DenseBase::Index DenseBase::count() const { return derived().template cast().template cast().sum(); } /** \returns true is \c *this contains at least one Not A Number (NaN). * * \sa allFinite() */ template inline bool DenseBase::hasNaN() const { return !((derived().array()==derived().array()).all()); } /** \returns true if \c *this contains only finite numbers, i.e., no NaN and no +/-INF values. * * \sa hasNaN() */ template inline bool DenseBase::allFinite() const { return !((derived()-derived()).hasNaN()); } } // end namespace Eigen #endif // EIGEN_ALLANDANY_H antimony/lib/fab/vendor/Eigen/src/Core/IO.h0000644000175000017500000001651113341130426017666 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2008 Benoit Jacob // Copyright (C) 2008 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_IO_H #define EIGEN_IO_H namespace Eigen { enum { DontAlignCols = 1 }; enum { StreamPrecision = -1, FullPrecision = -2 }; namespace internal { template std::ostream & print_matrix(std::ostream & s, const Derived& _m, const IOFormat& fmt); } /** \class IOFormat * \ingroup Core_Module * * \brief Stores a set of parameters controlling the way matrices are printed * * List of available parameters: * - \b precision number of digits for floating point values, or one of the special constants \c StreamPrecision and \c FullPrecision. * The default is the special value \c StreamPrecision which means to use the * stream's own precision setting, as set for instance using \c cout.precision(3). The other special value * \c FullPrecision means that the number of digits will be computed to match the full precision of each floating-point * type. * - \b flags an OR-ed combination of flags, the default value is 0, the only currently available flag is \c DontAlignCols which * allows to disable the alignment of columns, resulting in faster code. * - \b coeffSeparator string printed between two coefficients of the same row * - \b rowSeparator string printed between two rows * - \b rowPrefix string printed at the beginning of each row * - \b rowSuffix string printed at the end of each row * - \b matPrefix string printed at the beginning of the matrix * - \b matSuffix string printed at the end of the matrix * * Example: \include IOFormat.cpp * Output: \verbinclude IOFormat.out * * \sa DenseBase::format(), class WithFormat */ struct IOFormat { /** Default contructor, see class IOFormat for the meaning of the parameters */ IOFormat(int _precision = StreamPrecision, int _flags = 0, const std::string& _coeffSeparator = " ", const std::string& _rowSeparator = "\n", const std::string& _rowPrefix="", const std::string& _rowSuffix="", const std::string& _matPrefix="", const std::string& _matSuffix="") : matPrefix(_matPrefix), matSuffix(_matSuffix), rowPrefix(_rowPrefix), rowSuffix(_rowSuffix), rowSeparator(_rowSeparator), rowSpacer(""), coeffSeparator(_coeffSeparator), precision(_precision), flags(_flags) { int i = int(matSuffix.length())-1; while (i>=0 && matSuffix[i]!='\n') { rowSpacer += ' '; i--; } } std::string matPrefix, matSuffix; std::string rowPrefix, rowSuffix, rowSeparator, rowSpacer; std::string coeffSeparator; int precision; int flags; }; /** \class WithFormat * \ingroup Core_Module * * \brief Pseudo expression providing matrix output with given format * * \param ExpressionType the type of the object on which IO stream operations are performed * * This class represents an expression with stream operators controlled by a given IOFormat. * It is the return type of DenseBase::format() * and most of the time this is the only way it is used. * * See class IOFormat for some examples. * * \sa DenseBase::format(), class IOFormat */ template class WithFormat { public: WithFormat(const ExpressionType& matrix, const IOFormat& format) : m_matrix(matrix), m_format(format) {} friend std::ostream & operator << (std::ostream & s, const WithFormat& wf) { return internal::print_matrix(s, wf.m_matrix.eval(), wf.m_format); } protected: const typename ExpressionType::Nested m_matrix; IOFormat m_format; }; /** \returns a WithFormat proxy object allowing to print a matrix the with given * format \a fmt. * * See class IOFormat for some examples. * * \sa class IOFormat, class WithFormat */ template inline const WithFormat DenseBase::format(const IOFormat& fmt) const { return WithFormat(derived(), fmt); } namespace internal { template struct significant_decimals_default_impl { typedef typename NumTraits::Real RealScalar; static inline int run() { using std::ceil; using std::log; return cast(ceil(-log(NumTraits::epsilon())/log(RealScalar(10)))); } }; template struct significant_decimals_default_impl { static inline int run() { return 0; } }; template struct significant_decimals_impl : significant_decimals_default_impl::IsInteger> {}; /** \internal * print the matrix \a _m to the output stream \a s using the output format \a fmt */ template std::ostream & print_matrix(std::ostream & s, const Derived& _m, const IOFormat& fmt) { if(_m.size() == 0) { s << fmt.matPrefix << fmt.matSuffix; return s; } typename Derived::Nested m = _m; typedef typename Derived::Scalar Scalar; typedef typename Derived::Index Index; Index width = 0; std::streamsize explicit_precision; if(fmt.precision == StreamPrecision) { explicit_precision = 0; } else if(fmt.precision == FullPrecision) { if (NumTraits::IsInteger) { explicit_precision = 0; } else { explicit_precision = significant_decimals_impl::run(); } } else { explicit_precision = fmt.precision; } std::streamsize old_precision = 0; if(explicit_precision) old_precision = s.precision(explicit_precision); bool align_cols = !(fmt.flags & DontAlignCols); if(align_cols) { // compute the largest width for(Index j = 0; j < m.cols(); ++j) for(Index i = 0; i < m.rows(); ++i) { std::stringstream sstr; sstr.copyfmt(s); sstr << m.coeff(i,j); width = std::max(width, Index(sstr.str().length())); } } s << fmt.matPrefix; for(Index i = 0; i < m.rows(); ++i) { if (i) s << fmt.rowSpacer; s << fmt.rowPrefix; if(width) s.width(width); s << m.coeff(i, 0); for(Index j = 1; j < m.cols(); ++j) { s << fmt.coeffSeparator; if (width) s.width(width); s << m.coeff(i, j); } s << fmt.rowSuffix; if( i < m.rows() - 1) s << fmt.rowSeparator; } s << fmt.matSuffix; if(explicit_precision) s.precision(old_precision); return s; } } // end namespace internal /** \relates DenseBase * * Outputs the matrix, to the given stream. * * If you wish to print the matrix with a format different than the default, use DenseBase::format(). * * It is also possible to change the default format by defining EIGEN_DEFAULT_IO_FORMAT before including Eigen headers. * If not defined, this will automatically be defined to Eigen::IOFormat(), that is the Eigen::IOFormat with default parameters. * * \sa DenseBase::format() */ template std::ostream & operator << (std::ostream & s, const DenseBase & m) { return internal::print_matrix(s, m.eval(), EIGEN_DEFAULT_IO_FORMAT); } } // end namespace Eigen #endif // EIGEN_IO_H antimony/lib/fab/vendor/Eigen/src/Core/Select.h0000644000175000017500000001372113341130426020576 0ustar tiagotiago// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SELECT_H #define EIGEN_SELECT_H namespace Eigen { /** \class Select * \ingroup Core_Module * * \brief Expression of a coefficient wise version of the C++ ternary operator ?: * * \param ConditionMatrixType the type of the \em condition expression which must be a boolean matrix * \param ThenMatrixType the type of the \em then expression * \param ElseMatrixType the type of the \em else expression * * This class represents an expression of a coefficient wise version of the C++ ternary operator ?:. * It is the return type of DenseBase::select() and most of the time this is the only way it is used. * * \sa DenseBase::select(const DenseBase&, const DenseBase&) const */ namespace internal { template struct traits > : traits { typedef typename traits::Scalar Scalar; typedef Dense StorageKind; typedef typename traits::XprKind XprKind; typedef typename ConditionMatrixType::Nested ConditionMatrixNested; typedef typename ThenMatrixType::Nested ThenMatrixNested; typedef typename ElseMatrixType::Nested ElseMatrixNested; enum { RowsAtCompileTime = ConditionMatrixType::RowsAtCompileTime, ColsAtCompileTime = ConditionMatrixType::ColsAtCompileTime, MaxRowsAtCompileTime = ConditionMatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = ConditionMatrixType::MaxColsAtCompileTime, Flags = (unsigned int)ThenMatrixType::Flags & ElseMatrixType::Flags & HereditaryBits, CoeffReadCost = traits::type>::CoeffReadCost + EIGEN_SIZE_MAX(traits::type>::CoeffReadCost, traits::type>::CoeffReadCost) }; }; } template class Select : internal::no_assignment_operator, public internal::dense_xpr_base< Select >::type { public: typedef typename internal::dense_xpr_base