kimwitu-doc-10a+1/0000755000114400011300000000000010620567141013676 5ustar piefelsimulantkimwitu-doc-10a+1/a4wide.sty0000644000114400011300000000257707076562205015636 0ustar piefelsimulant% % "moretext" document style option. % Jean-Francois Lamy, July 86 % % Redefines the margins so that they are more in line with % what we are used to see. % \input a4.sty \ifcase \@ptsize % mods for 10 pt \oddsidemargin 0.15 in % Left margin on odd-numbered pages. \evensidemargin 0.35 in % Left margin on even-numbered pages. \marginparwidth 1 in % Width of marginal notes. \oddsidemargin 0.25 in % Note that \oddsidemargin = \evensidemargin \evensidemargin 0.25 in \marginparwidth 0.75 in \textwidth 5.875 in % Width of text line. \or % mods for 11 pt \oddsidemargin 0.1 in % Left margin on odd-numbered pages. \evensidemargin 0.15 in % Left margin on even-numbered pages. \marginparwidth 1 in % Width of marginal notes. \oddsidemargin 0.125 in % Note that \oddsidemargin = \evensidemargin \evensidemargin 0.125 in \marginparwidth 0.75 in \textwidth 6.125 in % Width of text line. \or % mods for 12 pt \oddsidemargin -10 pt % Left margin on odd-numbered pages. \evensidemargin 10 pt % Left margin on even-numbered pages. \marginparwidth 1 in % Width of marginal notes. \oddsidemargin 0 in % Note that \oddsidemargin = \evensidemargin \evensidemargin 0 in \marginparwidth 0.75 in \textwidth 6.375 true in % Width of text line. \fi kimwitu-doc-10a+1/lgrind.sty0000644000114400011300000001335407076562206015734 0ustar piefelsimulant%% %% This is file `lgrind.sty' generated %% on <1991/9/13> with the docstrip utility (v1.1l test). %% %% The original source file was `lgrind.doc'. %% %% %% lgrind is a minor adaptation of Jerry Leichter's tgrind for LaTeX, %% which was a notable improvement upon Van Jacobsen's tgrind for %% plain TeX, which was adapted from vgrind, a troff prettyprinter. %% %% LaTeX lgrind environment \newif\ifc@mment %True when setting a comment \newif\ifstr@ng %True when setting a string constant \newif\ifright@ %In comments, \" => '' (else ``) \newif\ifLGd@fault %True after default \LGbegin \newcount\lc@unt %Line counter \newcount\ln@xt %Next line to get numbered \newbox\ls@far %Stores lines so far for tabbing \newdimen\TBw@d %Tabwidth when tabbing \newbox\tb@x %Tab positioning \newdimen\@ts %Width of listing space {\catcode`\_=\active \gdef\@setunder{\let_=\sp@ce}} \def\BGfont{\sf} %"Background" font \def\CMfont{\rm} %Comment font \def\KWfont{\bf} %Keyword font \def\STfont{\tt} %String font \def\VRfont{\it} %Variable name font \def\LGsize{\small} %Size to use in displayed code \def\LGfsize{\footnotesize} %Size to use in \lgrindfile \newif\ifLGinline %True for in-line code \newif\ifLGleftnum %Put line numbers on the left \newcount\LGnuminterval %Line numbering interval \LGnuminterval=10 \newskip\LGindent %Indentation for displayed lines \LGindent=1.6667\parindent \def\LGbegin{\ifLGinline$\hbox\else$$\vbox\fi\bgroup\LGd@faulttrue} \def\LGend{\ifLGd@fault\egroup\ifLGinline$\else$$\fi\LGd@faultfalse\fi} \def\lgrind{% \def\Line##1{\L{\LB{##1}}}% %For use with %= \let\Head=\@gobble %Header information (-h) \def\File##1,##2,##3{}% %File specification info \let\Proc=\@gobble %Marks procedure def beginning \let\ProcCont=\@gobble %Marks end of nested procedure def \def\NewPage{\filbreak\bigskip}% \ifLGinline \def\L##1{\setbox\ls@far\null\CF\strut##1\ignorespaces}% \else \let\r@ghtlno\relax\let\l@ftlno\relax \ifnum\LGnuminterval>\z@ \ifLGleftnum \def\l@ftlno{\ifnum\lc@unt>\ln@xt\global\advance\ln@xt by\LGnuminterval \llap{{\scriptsize\the\lc@unt\quad}}\fi}% \else \def\r@ghtlno{\ifnum\lc@unt>\ln@xt\global\advance\ln@xt by\LGnuminterval \rlap{{\scriptsize\enspace\the\lc@unt}}\fi}% \fi \fi \def\L##1{\@@par\setbox\ls@far=\null\CF\strut \global\advance\lc@unt by1% \hbox to\hsize{\hskip\LGindent\l@ftlno ##1\hfil\r@ghtlno}\ignorespaces}% \fi \lc@unt=0\ln@xt=\LGnuminterval\advance\ln@xt by-1% \def\LB{\CF\hbox\bgroup\box\ls@far\let\next=}% \def\Tab##1{\setbox\tb@x=\lastbox\TBw@d=\wd\tb@x\advance\TBw@d by 1\@ts \ifdim\TBw@d>##1\@ts \setbox\ls@far=\hbox{\box\ls@far \box\tb@x \sp@ce}\else \setbox\ls@far=\hbox to ##1\@ts{\box\ls@far \box\tb@x \hfil}\fi\LB}% \ifLGinline\def\sp@ce{\hskip .3333em}% \else \setbox\tb@x=\hbox{{\tt 0}}\@ts=\wd\tb@x \def\sp@ce{\hskip 1\@ts}\fi \catcode`\_=\active \@setunder \let\CF=\BGfont \def\K##1{{\KWfont ##1}\global\futurelet\next\ic@r}% %Keyword \def\V##1{{\VRfont ##1}\global\futurelet\next\ic@r}% %Variable \def\ic@r{\let\@tempa\/\ifx.\next\let\@tempa\relax% %Optional \/ \else\ifx,\next\let\@tempa\relax\fi\fi\@tempa}% \def\C{\CMfont \global\let\CF\CMfont \global\c@mmenttrue \global\right@false}% \def\CE{\BGfont \global\let\CF\BGfont \global\c@mmentfalse}% \def\S{\STfont \global\let\CF\STfont \global\str@ngtrue}% \def\SE{\BGfont \global\let\CF\BGfont \global\str@ngfalse}% \def\,{\relax \ifmmode\mskip\thinmuskip \else\thinspace \fi}% \def\!{\relax \ifmmode\mskip-\thinmuskip \else\negthinspace \fi}% \def\CH##1##2##3{\relax\ifmmode ##1\relax \else\ifstr@ng ##2\relax\else$##3$\fi\fi }% \def\{{\CH\lbrace {\char'173}\lbrace }% \def\}{\CH\rbrace {\char'175}\rbrace }% \def\1{\CH///}% %/ \def\2{\CH\backslash {\char'134}\backslash }% %\ \def\|{\CH|{\char'174}|}% \def\<{\CH<<<}% \def\>{\CH>>>}% \def\*{\CH***}\relax %\relax for DOCSTY \def\-{\CH---}% \def\_{\ifstr@ng {\char'137}\else \leavevmode \kern.06em \vbox{\hrule width.35em}% \ifdim\fontdimen\@ne\font=\z@ \kern.06em \fi\fi }% \def\&{{\sf \char'046}}% \def\#{{\STfont \char'043}}% \def\%{{\char'045}}% \def\~{{\char'176}}% \def\"{\ifc@mment\ifright@ ''\global\right@false \else``\global\right@true \fi \else{\tt \char'042}\fi}% \def\'{\ifc@mment'\else {\tt \char'015}\fi}% \def\^{{\tt \char'136}}% \def\${{\ifmmode\sl\else\ifdim\fontdimen\@ne\font>\z@\sl\fi\fi \char'044}}% %No $ in \it, use \sl \parindent\z@\parskip\z@ plus 1pt\hsize\linewidth% } \def\endlgrind{\@@par} \def\lgrinde{\ifLGinline\else\LGsize\fi\begin{lgrind}} \def\endlgrinde{\end{lgrind}} \def\lagrind{\@ifstar{\@slagrind}{\@lagrind}} \def\@lagrind{\@ifnextchar[{\@@lagrind}{\@@lagrind[t]}} \def\@slagrind{\@ifnextchar[{\@@slagrind}{\@@slagrind[t]}} \def\@@lagrind[#1]#2#3#4{% \begin{figure}[#1] \hrule \vskip .5\baselineskip \begin{minipage}\columnwidth\LGsize\LGindent\z@ \begin{lgrind} \input #2\relax \end{lgrind} \end{minipage} \vskip .5\baselineskip plus .5\baselineskip \begingroup \setbox\z@=\hbox{#4}% \ifdim\wd\z@>\z@ \caption{#3}% \label{#4}% \else \captcont{#3}% \fi \endgroup \vskip 2pt \hrule \end{figure} } \def\@@slagrind[#1]#2#3#4{% \begin{figure*}[#1] \hrule \vskip .5\baselineskip \begin{minipage}\textwidth\LGsize\LGindent\z@ \begin{lgrind} \input #2\relax \end{lgrind} \end{minipage} \vskip .5\baselineskip plus .5\baselineskip \begingroup \setbox\z@=\hbox{#4}% \ifdim\wd\z@>\z@ \caption{#3}% \label{#4}% \else \captcont{#3}% \fi \endgroup \vskip 2pt \hrule \end{figure*} } \def\lgrindfile#1{% \par\addvspace{0.1in} \hrule \nopagebreak[2] \vskip .5\baselineskip \nopagebreak[2] \begingroup\LGfsize\LGindent\z@ \begin{lgrind} \input #1\relax \end{lgrind} \endgroup \nopagebreak[2] \vskip .5\baselineskip \nopagebreak[2] \hrule \addvspace{0.1in} } \endinput %% %% End of file `lgrind.sty'. kimwitu-doc-10a+1/phylum_example.k0000444000114400011300000000012607076562206017110 0ustar piefelsimulantexpr: Plus(expr expr) | Minus(expr expr) | Neg(expr) | Zero() ; exprlist: list expr; kimwitu-doc-10a+1/list_phylum_example.k0000444000114400011300000000007107076562206020142 0ustar piefelsimulantexprlist: Nilexprlist() | Consexprlist(expr exprlist) ; kimwitu-doc-10a+1/attributed_phylum_example.k0000444000114400011300000000012407076562207021336 0ustar piefelsimulantexpr: Plus(expr expr) | Minus(expr expr) | Neg(expr) | Zero() { float value = 0;} ; kimwitu-doc-10a+1/alternative_attribute_initialisation.k0000444000114400011300000000004107076562207023555 0ustar piefelsimulant{ float value; { $0->value=0;}} kimwitu-doc-10a+1/uniq_phylum_example.k0000444000114400011300000000006407076562207020146 0ustar piefelsimulantID {uniq}: Str(casestring) { short type = UNDEF;}; kimwitu-doc-10a+1/symbol_table_example.k0000444000114400011300000000040607076562207020250 0ustar piefelsimulant/* defining occurrence */ id = Str(mkcasestring("foo")); if (id->type != UNDEF) error("doubly defined"); id->type = USED; /* set other attributes here as well */ /* applied occurrence */ id = Str(mkcasestring("foo")); if (id->type == UNDEF) error("undefined"); kimwitu-doc-10a+1/non_uniq_declaration_example.k0000444000114400011300000000001607076562210021756 0ustar piefelsimulantID {! uniq}:; kimwitu-doc-10a+1/storage_classes_example.k0000444000114400011300000000027207076562210020750 0ustar piefelsimulant%storageclass intermediate wholeprogram; /* the `%' is part of the keyword */ ID2 { intermediate }: Operator1( ... ) | Operator2( ... ) | ... ; ID3 { wholeprogram }: Operator3( ... ); kimwitu-doc-10a+1/redefine_LARGEPRIME_example.k0000444000114400011300000000005307076562211021055 0ustar piefelsimulant%{ KC_TYPES_HEADER #define LARGEPRIME 0 %} kimwitu-doc-10a+1/function_definition_example_newest.k0000440000114400011300000000017107076562211023204 0ustar piefelsimulantint len(exprlist el) { with(el) { Nilexprlist: { return 0; } tt = Consexprlist(*, t): { return len(t) + 1; } } } kimwitu-doc-10a+1/foreach_statement_example.k0000444000114400011300000000017107076562212021262 0ustar piefelsimulantint len(exprlist el) { int length = 0; foreach( l; exprlist el ) { length++; } return length; } kimwitu-doc-10a+1/rewrite_rule_example.k0000444000114400011300000000003407076562212020275 0ustar piefelsimulantNeg(x) -> Minus(Zero(), x); kimwitu-doc-10a+1/foreach_with_statement_example.k0000444000114400011300000000035207076562212022316 0ustar piefelsimulantexpr sum(exprlist el) { expr sub_total = Zero(); foreach( $e; exprlist el ) { Add( x ): { sub_total = Plus( sub_total, x ); } Subtract( x ): { sub_total = Minus( sub_total, x ); } } return sub_total; } kimwitu-doc-10a+1/foreach_pattern_statement_example.k0000444000114400011300000000024507076562212023021 0ustar piefelsimulantexpr add_Adds(exprlist el) { expr all_Adds = Zero(); foreach( Add( x ); exprlist el ) { all_Adds = Plus( all_Adds, x ); } return all_Adds; } kimwitu-doc-10a+1/rewrite_rule_function_example.k0000444000114400011300000000003407076562213022203 0ustar piefelsimulantOperator(x) -> Function(x); kimwitu-doc-10a+1/unparse_definition_example.k0000444000114400011300000000036207076562213021457 0ustar piefelsimulantPlus(e1, e2) -> [ : e1 "+" e2 ]; Minus(e1, e2) -> [ : e1 "-" e2 ]; Neg(e1) -> [ : "-" e1 ]; Zero() -> [ : "0" ]; Nilexprlist() -> [ : ]; Consexprlist(ex, Nilexprlist()) -> [ : ex ]; Consexprlist(ex, rest) -> [ : ex ", " rest ]; kimwitu-doc-10a+1/unparse_escaped_brackets_example.k0000444000114400011300000000023107076562213022604 0ustar piefelsimulantDivideby(e1, e2) -> [ : { if (eq_expr(e2, Zero()) } ${ e1 "/ /* <-- division by zero --> */" e2 $} { else } ${ e1 "/" e2 $} ]; kimwitu-doc-10a+1/unparse_views_example.k0000444000114400011300000000005307076562214020462 0ustar piefelsimulantPlus(e1 e2) -> [ view1 view2: e1 "+" e2 ]; kimwitu-doc-10a+1/unparse_view_declaration_example.k0000444000114400011300000000007107076562214022644 0ustar piefelsimulant%uview view1 view2; /* the `%' is part of the keyword */ kimwitu-doc-10a+1/printer_function_example.k0000444000114400011300000000021307076562214021156 0ustar piefelsimulantvoid printer(char *s, uview v) { printf("%s", s); } { /* example of usage */ unparse_exprlist(expression, printer, base_uview); } kimwitu-doc-10a+1/including_in_generated_files_example.k0000444000114400011300000000012207076562215023430 0ustar piefelsimulant%{ /* these brackets should be at the beginning of a line */ #include %} kimwitu-doc-10a+1/include_redirection_example.k0000444000114400011300000000012707076562215021605 0ustar piefelsimulant%{ KC_TYPES_HEADER /* Include this in k.h, and thus in every generated file. */ %} kimwitu-doc-10a+1/generated_data_type_example.k0000444000114400011300000000126107076562215021563 0ustar piefelsimulanttypedef enum { ..., sel_Neg = 4, sel_Minus = 5, sel_Plus = 6, sel_Zero = 7, sel_Nilexprlist = 8, sel_Consexprlist = 9, ... } kc_enum_operators; typedef struct kc_tag_expr *expr; /* note that a `expr' is a pointer to a `struct kc_tag_expr'*/ typedef struct kc_tag_exprlist *exprlist; struct kc_tag_expr { kc_enum_operators prod_sel; union { struct { expr expr_1; } Neg; struct { expr expr_1; expr expr_2; } Minus; struct { expr expr_1; expr expr_2; } Plus; } u; float value; /* an attribute */ }; struct kc_tag_exprlist { kc_enum_operators prod_sel; union { struct { expr expr_1; exprlist exprlist_1; } Consexprlist; } u; }; kimwitu-doc-10a+1/include_redirection_symbol_example.k0000444000114400011300000000004507076562215023171 0ustar piefelsimulant%{ KC_REWRITE #include "my_fns.h" %} kimwitu-doc-10a+1/yacc_abstract_syntax_example.k0000444000114400011300000000012007076562215021774 0ustar piefelsimulant/* Abstract syntax */ funnytree: Str(casestring) | Cons(funnytree funnytree) ; kimwitu-doc-10a+1/structure_file_abstract_syntax_example.k0000444000114400011300000000012007076562215024114 0ustar piefelsimulant/* Abstract syntax */ funnytree: Str(casestring) | Cons(funnytree funnytree) ; kimwitu-doc-10a+1/funny.k0000444000114400011300000000031407076562216015216 0ustar piefelsimulant/* A very simple tree structure */ funnytree: Str(casestring) | Cons(funnytree funnytree) ; int nroftips(funnytree $f) { Str: { return 1; } Cons(l, r): { return nroftips(l) + nroftips(r); } } kimwitu-doc-10a+1/unparse_control_sequences.k0000444000114400011300000000004107076562216021344 0ustar piefelsimulant"start%t%nlevel1%nlevel1%b%nend" kimwitu-doc-10a+1/unparse_output.k0000444000114400011300000000003207076562216017151 0ustar piefelsimulantstart level1 level1 end kimwitu-doc-10a+1/printer_function_with_indentation.k0000444000114400011300000000054507076562216023104 0ustar piefelsimulant#include #include "unpk.h" static indent=0; void printer(char *s, uview v) { char c; int j; while(c=*s++) { if (c!='%') putchar(c); else switch(c=*s++) { case 'b': indent--; break; case 't': indent++; break; case 'n': putchar('\n'); for (j=indent; j>0; j--) putchar('\t'); break; case '\0': return; default: putchar(c); }}} kimwitu-doc-10a+1/knuth1.k0000444000114400011300000000072207076562216015274 0ustar piefelsimulant/* From D. Knuth, Semantics of Context Free Languages */ /* The abstract syntax tree of fractional binary numbers, attributed */ number: Nonfraction(bitstring) | Fraction(bitstring bitstring) { float value; /* synthesized */} ; bitstring: Oneb(bit) | Moreb(bitstring bit) { float value; /* synthesized */ int length; /* synthesized */ int scale; /* inherited */ } ; bit: One() | Zero() { float value; /* synthesized */ int scale; /* inherited */ } ; kimwitu-doc-10a+1/knuth2.k0000444000114400011300000000140307076562217015273 0ustar piefelsimulant/* illustrating attribute evaluation without storing the attributes */ float eval_number_value(number $n) { Nonfraction(b): { return eval_bitstring_value(b,0); } Fraction(b1, b2): { return eval_bitstring_value(b1,0) + eval_bitstring_value(b2, -eval_bitstring_length(b2));} } float eval_bitstring_value(bitstring $bs, int scale) { Oneb(b): { return eval_bit_value(b, scale); } Moreb(bs_bs, bs_b): { return eval_bitstring_value(bs_bs,scale+1)+ eval_bit_value(bs_b, scale); } } int eval_bitstring_length(bitstring $bs) { Oneb: { return 1; } Moreb(bs_bs, *): { return eval_bitstring_length(bs_bs)+1; } } %{ #include %} float eval_bit_value(bit $b, int scale) { One: { return exp2((float)scale); } Zero: { return 0.0; } } kimwitu-doc-10a+1/knuth3.k0000444000114400011300000000201707076562217015276 0ustar piefelsimulant/* illustrating a multi-pass evaluation */ void pass1_number(number $n) { Nonfraction(b): { pass1_bitstring(b); } Fraction(b1, b2): { pass1_bitstring(b1); pass1_bitstring(b2); } } void pass1_bitstring(bitstring $b) { Oneb: { b->length=1;} Moreb(bs, *): { pass1_bitstring(bs); b->length=bs->length+1; } } /* pass1_bit omitted, it does nothing */ void pass2_number(number $n) { Nonfraction(b): { b->scale=0; pass2_bitstring(b); n->value= b->value; } Fraction(b1, b2): { b1->scale=0; b2->scale= -b2->length; pass2_bitstring(b1); pass2_bitstring(b2); n->value= b1->value+b2->value; } } void pass2_bitstring(bitstring $bs) { Oneb(b): { b->scale= bs->scale; pass2_bit(b); bs->value= b->value; } Moreb(b1, b2): { b2->scale= bs->scale; b1->scale= bs->scale+1; pass2_bitstring(b1); pass2_bit(b2); bs->value= b1->value + b2->value; } } void pass2_bit(bit $b) { One: { b->value= exp2((float)b->scale); } Zero: { b->value= 0.0; } } kimwitu-doc-10a+1/knuth4.k0000444000114400011300000000100307076562217015271 0ustar piefelsimulant/* the main program to call the evaluations */ void main() { number n; n = Fraction(Moreb(Moreb(Moreb(Oneb(One()), One()), Zero()), One()), Moreb(Oneb(Zero()), One())); /* 1101.01 */ printf(" %f \n", eval_number_value(n)); pass1_number(n); pass2_number(n); printf(" %f \n", n->value); n = Nonfraction(Moreb(Moreb(Moreb(Oneb(One()), One()), Zero()), One())); printf(" %f \n", eval_number_value(n)); /* 1101 */ pass1_number(n); pass2_number(n); printf(" %f \n", n->value); } kimwitu-doc-10a+1/nats_newer.k0000440000114400011300000000065307076562220016221 0ustar piefelsimulant/* the abstract data type of natural numbers */ nat: zero() | s(nat) | plus(nat nat) | mul(nat nat) | ack(nat nat) ; /* rewrite rules for addition, multiplication, and Ackermann's function */ plus(x, zero()) -> x; ack(zero(), x) -> s(x); plus(x, s(y)) -> s(plus(x, y)); ack(s(x), zero()) -> ack(x, s(zero())); mul(x, zero()) -> zero(); ack(s(x), s(y)) -> ack(x, ack(s(x),y)); mul(x, s(y)) -> plus(mul(x, y), x); kimwitu-doc-10a+1/ski.k0000444000114400011300000000041407076562220014641 0ustar piefelsimulant/* SKI combinator reduction */ %{ KC_REWRITE int cplus(); %} exp: S() | K() | I() | ap(exp exp) | num(int) | plus() ; ap(I(), x) -> x; ap(ap(K(), x), y) -> x; ap(ap(ap(S(), x), y), z) -> ap(ap(x, z), ap(y, z)); ap(ap(plus(), num(x)), num(y)) -> num(cplus(x, y)); kimwitu-doc-10a+1/fibonacci.k0000444000114400011300000000030107076562221015764 0ustar piefelsimulant/* Fibonacci */ %{ #include "rk.h" %} nat fib(nat $n) { zero(): { return s(zero()); } s(zero()): { return s(zero()); } s(s(x)): { return rewrite_nat( plus(fib(x), fib(s(x)))); } } kimwitu-doc-10a+1/fibonacci_memo.k0000444000114400011300000000065407076562221017014 0ustar piefelsimulant/* Fibonacci with memo function */ nat{uniq}: zero() | s(nat) | plus(nat nat) { nat fib = (nat)0; } ; /* rewrite rules omitted */ %{ #include "rk.h" %} nat fibm(nat n) { nat result; if (n->fib != (nat)0) return n->fib; with(n){ zero(): { result = s(zero()); } s(zero()): { result = s(zero()); } s(s(x)): { result = rewrite_nat( plus(fibm(x), fibm(s(x)))); } } n->fib = result; return result; } kimwitu-doc-10a+1/comsub.k0000444000114400011300000000131207076562221015342 0ustar piefelsimulant/* A very simple tree structure */ funnytree {uniq}: Str(casestring) | Cons(funnytree funnytree) { int ocs = 0; funnytree next ; { $0->next = alltrees; alltrees = $0; } } ; void occurs(funnytree $f) { Str: { f->ocs++; } Cons(f1, f2): { f->ocs++; occurs(f1); occurs(f2); } } %{ KC_TYPES_HEADER funnytree alltrees; %} void main() { funnytree ft, it; alltrees = (funnytree)0; ft = Str(mkcasestring("foo")); ft = Cons(ft, ft); ft = Cons(ft, Str(mkcasestring("bar"))); ft = Cons(ft, ft); it = alltrees; occurs(it); for(; it!= (funnytree)0; it= it->next) { if (it->ocs>1) { printf("occurs %d times:\n", it->ocs); print_funnytree(it); } } } kimwitu-doc-10a+1/printer_function_with_view_indentation.k0000444000114400011300000000055507076562222024134 0ustar piefelsimulant#include #include "unpk.h" void printer(char *s, uview v) { char c; int j; static indent=0; /* static here, or static at file level */ switch(v) { case v_left: indent--; break; case v_right: indent++; break; case v_nl: putchar('\n'); for (j=indent; j>0; j--) putchar('\t'); break; default: printf("%s", s); break; }} kimwitu-doc-10a+1/unparse_view_sequences.k0000444000114400011300000000011507076562223020636 0ustar piefelsimulant"start" "":v_right "":v_nl "level1" "":v_nl "level1" "":v_left "":v_nl "end" kimwitu-doc-10a+1/skil.l0000444000114400011300000000042707076562226015030 0ustar piefelsimulant/* lex input for numbers */ %{ #include "k.h" #include "y.tab.h" #include #include %} %% [0-9]+ { sscanf(yytext, "%d", &yylval.yt_int); return NUM;} [\t\n ] { ; } /* skip the white space */ . { return (isupper(yytext[0])?tolower(yytext[0]):yytext[0]); } kimwitu-doc-10a+1/printer_function_with_control_view_indentation.k0000444000114400011300000000063707076562223025676 0ustar piefelsimulant#include #include "unpk.h" void printer(char *s, uview v) { char c; int j; static indent=0; /* static here, or static at file level */ switch(v) { case v_left: indent--; break; case v_right: indent++; break; default: while(c=*s++) { switch(c){ case '\n': putchar(c); for (j=indent; j>0; j--) putchar('\t'); break; default: putchar(c); break; }}}} kimwitu-doc-10a+1/unparse_control_view_sequences.k0000444000114400011300000000007007076562223022376 0ustar piefelsimulant"start" "":v_right "\nlevel1\nlevel1" "":v_left "\nend" kimwitu-doc-10a+1/rewrite_rule_general_example.k0000440000114400011300000000004307076562224021771 0ustar piefelsimulantNeg(x) -> <: Minus(Zero(), x) >; kimwitu-doc-10a+1/rewrite_views_example.k0000440000114400011300000000005707076562224020467 0ustar piefelsimulantNeg(x) -> < view1 view2: Minus(Zero(), x) >; kimwitu-doc-10a+1/unparse_rule_general.k0000440000114400011300000000011007076562224020245 0ustar piefelsimulantpattern1, pattern2, ... -> [ v1 v2 ... : ... ], ..., [ vn ... : ... ] ; kimwitu-doc-10a+1/rewrite_rule_general.k0000440000114400011300000000011007076562224020251 0ustar piefelsimulantpattern1, pattern2, ... -> < v1 v2 ... : ... >, ..., < vn ... : ... > ; kimwitu-doc-10a+1/function_definition_example_dollar.k0000440000114400011300000000015207076562225023160 0ustar piefelsimulantint len(exprlist $el) { Nilexprlist: { return 0; } tt = Consexprlist(*, t): { return len(t) + 1; } } kimwitu-doc-10a+1/function_definition_equiv.k0000440000114400011300000000056507076562225021331 0ustar piefelsimulantboolean equiv(expr $a, expr $b) { Add( asub ) & Add( bsub ), Subtract( asub ) & Subtract( bsub ), Const( * ) & Const( * ): { return equiv( asub, bsub ); } Plus( asub1, asub2 ) & Plus( bsub1, bsub2 ), Minus( asub1, asub2 ) & Minus( bsub1, bsub2 ): { return equiv( asub1, bsub1 ) && equiv( asub2, bsub2 ); } default: { return False; } } kimwitu-doc-10a+1/foreach_equiv.k0000440000114400011300000000064607076562225016703 0ustar piefelsimulantboolean equiv_lists(exprlist el1, exprlist el2) { boolean result = True; foreach( e1 & e2; exprlist el1, explist el2 ) { /* this body is executed as long as both lists have elements */ result = result && equiv( e1, e2 ); } /* we don't know if one list is longer than the other; we only */ /* know the 'result' for the elements that we compared with 'equiv' */ return result; } kimwitu-doc-10a+1/foreach_equiv_afterforeach.k0000440000114400011300000000102707076562225021406 0ustar piefelsimulantboolean equiv_lists(exprlist el1, exprlist el2) { boolean result = True; foreach( e1 & e2; exprlist el1, explist el2 ) { /* this body is executed as long as both lists have elements */ result = result && equiv( e1, e2 ); } afterforeach( $re1 & $re2 /* same number of items here as after foreach */ ) { Nilexprlist() & Nilexprlist() : { /* both lists same length: result unchanged */ } default: { /* lists have different length: result changed */ result = False; } } return result; } kimwitu-doc-10a+1/pattern_parameterized.k0000440000114400011300000000030007076562225020437 0ustar piefelsimulantboolean has_sub(expr $a, expr $b) { Add( asub ) & asub = * : { /* code here will be executed if 'a' has top-operator 'Add', and asub == b */ } default: { return False; } } kimwitu-doc-10a+1/pattern_equivalence.k0000440000114400011300000000025207076562225020112 0ustar piefelsimulantAdd( asub ) & Add ( bsub ) , Add( asub ) & Subtract ( bsub ) : { /* C-code */ } Add( asub ) & ( Add ( bsub ) , Subtract ( bsub ) ) : { /* C-code */ } kimwitu-doc-10a+1/foreach_equivalence.k0000440000114400011300000000200307076562226020041 0ustar piefelsimulant/* here we combine all kinds of items: patterns, dollar-prefixed variables * and ordinary variables. Of course, the items can appear in arbitrary order. */ foreach( pattern1 & ... & patternk & $dvar1 & ... & $dvarn & var1 & ... & varm ; ... ) { /* body, in which we can refer to * pattern variables, dollar-prefixed variables (dvar*) * and ordinary variables (var*) * but (most often) not to dollar-variables ($i, i >= 0) */ } /* here we have the same statement, in which we only use ordinary variables, * (and we introduced 'anonymous' variables for the patterns pat* ), * together with nested with statements. */ foreach( var_pat1 & ... & var_patk & dvar1 & ... & dvarn & var1 & ... & varm ; ... ) { with( var_pat1, ..., var_patk ) { pattern1 & ... & patternk : { with( dvar1, ..., dvarn ) { /* body, in which we can refer to * pattern variables, dollar-prefixed variables (dvar*) * and ordinary variables (var*) * but (most often) not to dollar-variables ($i, i >= 0) */ } } } } kimwitu-doc-10a+1/lex_input_example.l0000444000114400011300000000030707076562226017605 0ustar piefelsimulant/* Lexemes */ %{ #include "k.h" #include "y.tab.h" %} %% [a-zA-Z0-9]+ { yylval.yt_casestring = mkcasestring(yytext); return ID;} [\t\n ] { ; } /* skip the white space */ . { return yytext[0]; } kimwitu-doc-10a+1/rewrite_view_declaration_example.k0000440000114400011300000000007107076562226022647 0ustar piefelsimulant%rview view1 view2; /* the `%' is part of the keyword */ kimwitu-doc-10a+1/csgoutmain.c0000444000114400011300000000047007076562226016224 0ustar piefelsimulant/* an example of structure file i/o */ #include #include "csgiok.h" #include "k.h" void main() { char *io; funnytree ft; ft = Str(mkcasestring("foo")); ft = Cons(ft, ft); ft = Cons(ft, ft); io = CSGIOwrite_funnytree(stdout, ft); if (io != (char *)0) printf("%s\n", io); } kimwitu-doc-10a+1/csginmain.c0000444000114400011300000000040307076562227016020 0ustar piefelsimulant/* an example of structure file i/o */ #include #include "csgiok.h" #include "k.h" void main() { char *io; funnytree ft; io = CSGIOread_funnytree(stdin, &ft); if (io== (char *)0) print_funnytree(ft); else printf("%s\n", io); } kimwitu-doc-10a+1/natsmain.c0000444000114400011300000000022707076562227015666 0ustar piefelsimulant#include "k.h" #include "rk.h" nat n2, n3; void main() { n2 = s(s(zero())); n3 = s(n2); print_nat(rewrite_nat(ack(n3, s(n3)), base_rview)); } kimwitu-doc-10a+1/skimain.c0000444000114400011300000000033407076562227015506 0ustar piefelsimulant/* SKI expression reduction, main */ #include "k.h" #include "rk.h" extern exp x; void main() { yyparse(); print_exp(x); print_exp(rewrite_exp(x), base_rview); } int cplus(int i, int j) { return i+j; } kimwitu-doc-10a+1/yacc_input_example.y0000444000114400011300000000036507076562230017750 0ustar piefelsimulant/* Concrete syntax */ %{ #include "k.h" funnytree thetree; %} %token ID %type tree %% theroot: tree { thetree = $1;}; tree: ID { $$ = Str($1);} | '(' tree tree ')' { $$ = Cons($2, $3);} ; kimwitu-doc-10a+1/skiy.y0000444000114400011300000000073707076562230015061 0ustar piefelsimulant/* yacc input SKI expression */ %{ #include "k.h" exp x; yyerror(char *s) { printf("%s\n", s); exit(1); } %} %token NUM /* the next 2 lines force left associativity */ %left '(' 'i' 's' 'k' '+' NUM %left LEFT %type exp %% theroot: exp { x = $1;}; exp: '(' exp ')' { $$ = $2;} | exp exp %prec LEFT { $$ = ap($1, $2);} | 'i' { $$ = I();} | 's' { $$ = S();} | 'k' { $$ = K();} | NUM { $$ = num($1);} | '+' { $$ = plus();} ; kimwitu-doc-10a+1/kimwitu_syntax.y0000444000114400011300000000763607076562230017206 0ustar piefelsimulantspecification: { phylumdeclaration | includedeclaration | functiondeclaration | rwdeclaration | unparsedeclaration | rviewdeclaration | uviewdeclaration | storageclassdeclaration } ; phylumdeclaration: ID [storage_option] ':' [productionblock] [Ccode] ';' ; storage_option: '{' ['!'] ID '}' ; productionblock: 'list' ID | alternative_list ; alternative_list: [alternative_list '|'] ID '(' arguments ')' ; arguments: {ID} ; Ccode: '{' [Attributes] [Cbody] '}' ; Attributes: [Attributes] ID ID ['=' Cexpression] ';' ; Cexpression: /* arbitrary C expression without ';' and ',' with '$0' */ includedeclaration: '%{' {ID} includes '%}' ; /* the tokens are at the beginning of a line */ /* ID's in {ID} are at the same line as '%{' */ includes: /* arbitrary text */ ; rwdeclaration: outmostpatterns '->' rwclauses_or_term ';' ; rwclauses_or_term: {rewriteclause} | outmostterm ; rewriteclause: '<' rviewnames ':' outmostterm '>' ; rviewnames: {ID} ; patternchains: patternchain [',' patternchains] ; patternchain: patternchainitem ['&' patternchain] ; patternchainitem: outmostpattern | '(' patternchains ')' | '$' ID /* this rule is to be used only in 'patternchain' in foreach_statement */ ; outmostpatterns: outmostpattern [',' outmostpatterns] ; outmostpattern: [ID '='] ID ['(' [patterns] ')'] | '*' | 'default' ; patterns: pattern [',' patterns] ; pattern: ID '=' pattern | ID ['(' [patterns] ')'] | '"' /* any string of characters */ '"' | /* a number */ | '*' | 'default' ; outmostterm: ID '(' [terms] ')' ; term: ID ['(' [terms] ')'] | '"' /* any string of characters */ '"' | /* a number */ ; terms: term [',' terms] ; functiondeclaration: decl_specifiers fn_declarator {declaration} MainCbody ; decl_specifiers: [stor_class_specifier] [type_qualifier] ID ; stor_class_specifier: 'auto' | 'register' | 'static' | 'extern' | 'typedef' ; type_qualifier: 'const' | 'volatile' ; fn_declarator: [pointer] direct_fn_declarator ; direct_fn_declarator: ID '(' [fnarguments] ')' | ID '(' parameter_type_list ')' ; fnarguments: [fnarguments ','] ['$'] ID ; parameter_type_list: parameter_list [',' '...'] ; parameter_list: [parameter_list ','] parameter_decl ; parameter_decl: decl_specifiers declarator | decl_specifiers abstract_declarator ; declarator: [pointer] direct_declarator ; pointer: '*' {type_qualifier} [pointer] ; direct_declarator: ['$'] ID | '(' pointer direct_declarator ')' | direct_declarator '[' [Cexpression] ']' | direct_declarator '(' param_type_list ')' | direct_declarator '(' [fnarguments] ')' ; abstract_declarator: pointer | [pointer] direct_abstract_declarator ; direct_abstract_declarator: '(' abstract_declarator ')' | [direct_abstract_declarator] '[' [Cexpression] ']' | [direct_abstract_declarator] '(' [parameter_type_list] ')' ; declaration: decl_specifiers [declarator_list] ; declarator_list: [declarator_list ','] declarator ; MainCbody: Cbody | '{' with_clause '}' /* a with_clause if a fn argument has a $ prefix */ ; Cbody: '{' Ctext '}' ; Ctext: /* arbitrary C text, with $0 through $n, with_statement and foreach_statement */ ; foreach_statement: 'foreach' '(' patternchain ';' IDCexpressions ')' MainCbody [ 'afterforeach' '(' patternchain ')' MainCbodyinC ] ; IDCexpressions: ID Cexpression [',' IDCexpressions] ; with_statement: 'with' '(' Cexpressions ')' '{' with_clause '}' ; Cexpressions: Cexpression [',' Cexpressions] ; with_clause: patternchains ':' Cbody [with_clause] ; unparsedeclaration: outmostpatterns '->' {unparseclause} ';' ; unparseclause: '[' uviewnames ':' {unparseitem} ']' ; uviewnames: {ID} ; unparseitem: '"' /* any string of characters*/ '"' [':' ID] | ['(' ID ')'] ID {'->' ID} [':' ID] | Cbody | '${' {unparseitem} '$}' ; rviewdeclaration: '%rview' rviewnames ';' ; uviewdeclaration: '%uview' uviewnames ';' ; storageclassdeclaration: '%storageclass' {ID} ';' ; kimwitu-doc-10a+1/makefile.make0000444000114400011300000000162307076562230016317 0ustar piefelsimulant# /* Makefile for the term processor */ # /* 2 input .k-files plus yacc and lex usage. */ IT = example KFILES = file1.k file2.k YOURFILES = ${KFILES} ${IT}y.y ${IT}l.l ${IT}main.c ALLOBJS = k.o rk.o csgiok.o unpk.o\ ${KFILES:k=o} ${IT}y.o ${IT}l.o ${IT}main.o GENERATED_BY_KC = k.c rk.c csgiok.c unpk.c ${KFILES:k=c}\ k.h rk.h csgiok.h unpk.h ${KFILES:k=h} YFLAGS = -d ${IT}: ${ALLOBJS} ${CC} ${CFLAGS} ${ALLOBJS} -ll -o $@ ${GENERATED_BY_KC}: kctimestamp kctimestamp: ${KFILES} kc ${KFILES}; touch kctimestamp ${ALLOBJS}: k.h ${IT}main.o ${IT}l.o: x.tab.h ${IT}main.o ${KFILES:k=o}: ${KFILES:k=h} ${IT}main.o rk.o: rk.h ${IT}main.o csgiok.o: csgiok.h ${IT}main.o unpk.o: unpk.h # /* making copies to prevent unnecessary recompilation after yacc run */ x.tab.h: y.tab.h -cmp -s x.tab.h y.tab.h || cp y.tab.h x.tab.h # /* if you clean up, don't forget to remove the file kctimestamp */ kimwitu-doc-10a+1/patchlvl0000444000114400011300000000127107076562231015443 0ustar piefelsimulant%{ CODE HEADER /* * The Termprocessor Kimwitu * * Copyright (c) 1991 University of Twente, Dept TIOS. * All rights reserved. * */ %} /* This file contains the patchlevel of the manual. * $Id: patchlvl,v 1.5 2000/04/17 10:02:25 belinfan Rel $ * Because we use MANUALVERSION as symbolic RCS name, * MANUALVERSION is *not* allowed to contain dots: we use _ instead * very important: the version number should be the third word * on the #define line (because the makefile depends on it). * The #define MANUALVERSION line should be the last line that contains * the word MANUALVERSION in this file (because the makefile depends on it). */ %{ KC_TYPES_HEADER #define MANUALVERSION V10a %} kimwitu-doc-10a+1/Makefile0000444000114400011300000001242707076562231015350 0ustar piefelsimulant# /* # * The Termprocessor Kimwitu # * # * Copyright (c) 1991-1993 University of Twente, Dept TIOS. # * All rights reserved. # * # */ # ALLSOURCES = $(NONTEXSOURCES) $(SOURCES) SOURCES = $(PATCHLEVELFILE) Makefile \ tp.man.tex license.tex \ tp.man.bib \ ${LGRIND_K_SOURCES:k=tex} \ ${LGRIND_L_SOURCES:l=tex} \ ${LGRIND_C_SOURCES:c=tex} \ ${LGRIND_Y_SOURCES:y=tex} \ ${LGRIND_MAKE_SOURCES:make=tex} NONTEXSOURCES = \ ${LGRIND_K_SOURCES} \ ${LGRIND_L_SOURCES} \ ${LGRIND_C_SOURCES} \ ${LGRIND_Y_SOURCES} \ ${LGRIND_MAKE_SOURCES} LGRIND_K_SOURCES = \ phylum_example.k list_phylum_example.k attributed_phylum_example.k \ alternative_attribute_initialisation.k uniq_phylum_example.k \ symbol_table_example.k non_uniq_declaration_example.k \ storage_classes_example.k redefine_LARGEPRIME_example.k \ function_definition_example_newest.k \ foreach_statement_example.k foreach_with_statement_example.k \ foreach_pattern_statement_example.k rewrite_rule_example.k \ rewrite_rule_function_example.k unparse_definition_example.k \ unparse_escaped_brackets_example.k unparse_views_example.k \ unparse_view_declaration_example.k printer_function_example.k \ including_in_generated_files_example.k include_redirection_example.k \ generated_data_type_example.k include_redirection_symbol_example.k \ yacc_abstract_syntax_example.k \ structure_file_abstract_syntax_example.k \ funny.k unparse_control_sequences.k \ unparse_output.k printer_function_with_indentation.k knuth1.k \ knuth2.k knuth3.k knuth4.k nats_newer.k ski.k \ fibonacci.k fibonacci_memo.k comsub.k \ printer_function_with_view_indentation.k \ printer_function_with_control_view_indentation.k \ unparse_view_sequences.k \ unparse_control_view_sequences.k \ rewrite_rule_general_example.k \ rewrite_views_example.k \ unparse_rule_general.k \ rewrite_rule_general.k \ function_definition_example_dollar.k \ function_definition_equiv.k \ foreach_equiv.k \ foreach_equiv_afterforeach.k \ pattern_parameterized.k \ pattern_equivalence.k \ foreach_equivalence.k \ rewrite_view_declaration_example.k LGRIND_Y_SOURCES = yacc_input_example.y skiy.y kimwitu_syntax.y LGRIND_C_SOURCES = csgoutmain.c csginmain.c natsmain.c skimain.c LGRIND_L_SOURCES = skil.l lex_input_example.l LGRIND_MAKE_SOURCES = makefile.make all: tp.man.dvi pdf: tp.man.pdf ps: tp.man.ps tp.man.dvi.no-tgrind: tp.man.tex latex tp.man.tex latex tp.man.tex bibtex tp.man makeindex tp.man.idx latex tp.man.tex latex tp.man.tex tp.man.dvi: $(SOURCES) latex tp.man.tex latex tp.man.tex bibtex tp.man makeindex tp.man.idx latex tp.man.tex latex tp.man.tex fast.no-tgrind: tp.man.tex latex tp.man.tex fast: $(SOURCES) latex tp.man.tex clean: -rm -f tp.man.aux tp.man.dvi tp.man.ilg tp.man.log tp.man.toc \ tp.man.idx tp.man.ind allsources: @echo $(ALLSOURCES) update: $(SOURCES) # Apart from the sources, we also have to distribute a4wide.sty, and # lgrind.sty, since they are not standard. shar: shar a4wide.sty lgrind.sty $(ALLSOURCES) > tp.man.shar .SUFFIXES: .k .tex .make .dvi .ps .fig .eps .pstex_t .pdf .html THUMBPDF = perl5 /Text/LaTeX/bin/thumbpdf #THUMBPDF = thumbpdf ## to be able to generate the thumbnails we first generate a .ps file ## (using a recursive invocation of MAKE) ## because we generate the thumbnails from a ps file and not from a pdf ## file, because our old ghostscript does not understand pdf containing ## images ## We remove the .aux file to force at least two runs, to make sure ## that the .out file is made and used (otherwise, pdflatex may use the ## .aux file generated by latex (while Making $*.ps) and do only one ## 'run') .tex.pdf: $(MAKE) $*.ps -rm -f $*.aux $(THUMBPDF) $*.ps pdflatex '\scrollmode \input '"$*" while grep -s 'Rerun to get cross-references right' $*.log ; do \ echo bibtex $* ; \ pdflatex '\scrollmode \input '"$*" ; \ done .dvi.ps: if grep -s 'PRINT THIS DOCUMENT IN LANDSCAPE MODE' $*.log ; then \ dvips -t landscape $*.dvi -o $@ ; \ else \ dvips $*.dvi -o $@ ; \ fi .k.tex: lgrind -i -lk $< > $@ .y.tex: lgrind -i -ly $< > $@ .make.tex: #lgrind -i -lmake $< > $@ lgrind -i -lsh $< > $@ .l.tex: #lgrind -i -ll $< > $@ lgrind -i -lk $< > $@ .c.tex: lgrind -i -lc $< > $@ thesource: @echo $(ALLSOURCES) longsource: @ls -lrt $(ALLSOURCES) writablesource: @ls -lrt $(ALLSOURCES) | grep 'rw-' | awk '{print $$NF}' # # RCS stuff # PATCHLEVELFILE = patchlvl # because we use $(MANUALVERSION) as symbolic RCS name, # MANUALVERSION is *not* allowed to contain dots: we use _ instead MANUALVERSION = `grep MANUALVERSION $(PATCHLEVELFILE) | awk '{print $$3}' | tail -1` version: @echo $(MANUALVERSION) release: for f in $(ALLSOURCES); do \ rev=`rlog -h $${f} | grep '^head:' | awk '{printf("%s", $$2)}' ` ; \ echo "rcs -n$(MANUALVERSION):$${rev} -sRel:$${rev} $${f}" ; \ rcs -n$(MANUALVERSION):$${rev} -sRel:$${rev} $${f} ; \ co -r$${rev} $${f} ; \ done touch: for f in $(NONTEXSOURCES) $(SOURCES); do \ co $${f} ; \ done kimwitu-doc-10a+1/tp.man.tex0000444000114400011300000033516207076631734015640 0ustar piefelsimulant% Termprocessor user and reference manual, more or less % /* % * The Termprocessor Kimwitu % * % * Copyright (c) 1991-1996 University of Twente, Dept TIOS. % * All rights reserved. % * % */ \documentclass[twoside,titlepage]{article} % Specifies the document style. \usepackage{a4} \oddsidemargin 0.25 in \evensidemargin 0.25 in \marginparwidth 0.75 in \textwidth 5.875 in % Width of text line. \pagestyle{headings} \raggedbottom \usepackage{makeidx} \usepackage{lgrind} % settings for the lgrind style \def\BGfont{\rm} %"Background" font \def\CMfont{\it} %Comment font \def\KWfont{\bf} %Keyword font \def\STfont{\tt} %String font \def\VRfont{\sf} %Variable name font \def\LGfsize{\small} %Size to use in \lgrindfile %\LGinlinetrue %True for in-line code \LGnuminterval=0 % don't put numbers in the right margin \title{The Term Processor {\em Kimwitu}\\ Manual and Cookbook} % Declares the document's title. \author{Peter van Eijk \and Axel Belinfante} % author's name. \date{April 17, 2000} \makeindex \usepackage{thumbpdf} \usepackage{varioref} \usepackage{xr} % ,pagebackref,hyperfigures,colorlinks \usepackage[bookmarksnumbered,bookmarksopen,plainpages,backref]{hyperref} \hypersetup{ pdftitle={The Term Processor Kimwitu -- Manual and Cookbook}, pdfauthor={Peter van Eijk and Axel Belinfante}, pdfsubject={}, pdfkeywords={}, hyperindex=true } %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{document} % End of preamble and beginning of text. %%\let\ctextfont=\sf \let\ccommentfont=\sl \let\cstringfont=\tt \pdfbookmark[1]{Title}{title} \maketitle % Produces the title. \pagestyle{empty} %\parskip 0.3cm \section*{License} \pdfbookmark[1]{License}{license} \input{license} \section*{Contact} \pdfbookmark[1]{Contact}{contact} Feedback about {\em Kimwitu} is welcome at: \begin{quote} Kimwitu Distribution\\ Formal Methods and Tools Group\\ Department of Computer Science\\ University of Twente\\ P.O. BOX 217\\ NL-7500 AE ENSCHEDE\\ THE NETHERLANDS\\ \\ email: \href{mailto:kimwitu@cs.utwente.nl}{$<$kimwitu@cs.utwente.nl$>$} \end{quote} More information about {\em Kimwitu} can be found on the web at: \begin{quote} \href{http://purl.oclc.org/net/kimwitu}{$<$http://purl.oclc.org/net/kimwitu$>$} \end{quote} \vfill \section*{About this document} \pdfbookmark[1]{About this document}{about} \begin{center} The Term Processor {\em Kimwitu} Manual and Cookbook, Version 10a.\\ This version is identical to Version 10, except for this page (the license text), a few bugfixes, \\ and a change from \LaTeX\ to \LaTeX2e, and the use of the hyperref package.\\ This manual documents {\em Kimwitu} version 4.5. \end{center} \newpage \pagestyle{headings} \pagenumbering{roman} \thispagestyle{plain} \pdfbookmark[1]{Contents}{contents} \tableofcontents %\newpage \parskip 0.3cm \mbox{} \vfill \newpage \pagenumbering{arabic} \setcounter{page}{1} \section*{Introduction} \begin{flushright} {\em When all else fails, read the manual. }\\ Anonymous. \end{flushright} In this document we describe a system that supports the construction of programs that use {\em trees} or {\em terms} as their main data structure. This system is a `meta'-tool in the development process of tools. Its {\em input} is an abstract description of terms, annotated with implementation directives, plus a description of functions on these terms. The {\em output} consists of a number of C-files that contain data structure definitions for the terms, a number of standard functions on those terms, and a translation (in C) of the function definitions in the input (e.g. term rewriting). The standard functions can be used to create terms, compare them for equality, read and write them on files in various formats and do manipulations like list concatenation. This document is organised as follows. In Section~\ref{sec:input} we describe the input format of the term processor\footnote{ There is no need to distinguish programming languages from their processors. Yet, calling our notation and tool a `term language' would allow unintended alternative interpretations. We see our notation and tool as only one of a class of term processors, it therefore has its own name. Colloquially we refer to it either as `the term processor' (being our only one) or as `Kimwitu' (pronounced `kee-mweetu', stress on the second syllable). }. In Section~\ref{sec:output} we describe the output of the system: the C types and functions, the files, etc. Section~\ref{sec:running} goes into the details of running the system, and the relation with other software development tools, such as the Unix tools {\sf make}, {\sf yacc}, {\sf lex}, {\sf lint} and the Synthesizer Generator (SG)\cite{csg:sigplan,csg:refman,csg:book}. \index{SG|see{Synthesizer Generator}} \index{Synthesizer Generator} Section~\ref{sec:cook} discusses various techniques in which programs can be written over terms, and thus gives examples of the concept of {\em multi-paradigm programming}. Finally, Section~\ref{sec:design} motivates some of the design decisions of our system. Some reading knowledge of the C programming language and associated tools is assumed. \section{Input} \label{sec:input} This section gives the input structure of {\em Kimwitu}. We describe how terms are defined, how attributes of terms are defined, how the storage strategy of terms can be specified, and how functions and rewrite rules on terms can be written. \subsection{Defining Terms} \label{input-terms} The input structure of terms is borrowed from the Synthesizer's Specification Language SSL. \index{SSL} An example is the following. \index{example!phylum}\index{phylum!example} \lgrindfile{phylum_example.tex} \noindent This declares two {\em phyla}\index{phylum}, or term types, or nonterminals, depending on your viewpoint. Each of these denotes a set of terms. As shown, there are two ways of constructing a phylum. One is by enumerating its variants, each of which is an {\em operator} \index{operator} applied to a list of phyla. It is possible to declare nullary operators, but it is not possible to define phyla that do not have operators. The other way is declaring it as a {\sf list} phylum. \index{list phylum} This is effectively equivalent to declaring the following right recursive phylum: \index{example!list phylum} \lgrindfile{list_phylum_example.tex} %lgrindupdate 16 -> 8 A list phylum therefore always has an empty list constructor, and a prefix operator. The advantage of a list declaration, apart from its brevity, is that it instructs the system to generate additional, list-specific, functions. {\sloppy There are a number of predefined phyla, among them are {\sf casestring} \index{casestring@{\sf casestring}} and {\sf nocasestring} \index{nocasestring@{\sf nocasestring}} for case-sensitive and case-insensitive character strings respectively. The full list of predefined phyla is in Section~\ref{gen-predef}. } Phyla can be defined more than once, and at each occurrence operators, attributes, and storage options (see below) can be added. For each phylum, the term processor generates a C data type (a record) with the same name. This is discussed in more detail in Section~\ref{gen-types}. \subsection{Attributes of Terms} \label{attr-terms-input} Phyla can be declared to have {\em attributes} \index{attributes} of a predeclared type. This type can be any C type, e.g. {\sf int}, or {\sf float}. Of course, it can also be a C type that is generated by {\em Kimwitu}. An example phylum with attributes is: \index{example!attributed phylum} \lgrindfile{attributed_phylum_example.tex} \noindent Here the attribute {\sf value} of type {\sf float} is defined, and initialised with {\sf 0}. \index{initialisation of attributes} Multiple attributes can be defined between the curly brackets. The initialisations are optional. The type of an attribute can also be a type generated by the term processor (such as a phylum). Attributes serve as a facility to decorate a tree with extra information. The decoration can be done in arbitrary user code. The attribute becomes a component of the record that is generated for the phylum. If {\sf x} is a value of type {\sf expr}, then the attribute can be referred to as {\sf x$\rightarrow$value}. As the last item of the initialisation a piece of arbitrary C, enclosed in curly brackets, is allowed. In this code the expression {\sf \$0} denotes the term that is being created. The code is executed after the term has been built completely, and the other initialisations have been performed. An alternative way of expressing the above initialisation therefore is: \lgrindfile{alternative_attribute_initialisation.tex} \subsection{Storage Options} \label{storage-options-input} The system (currently) provides two storage options, selectable on a per phylum basis. For both of them a C data type is generated for each phylum, together with a `create' function for each operator. In the default storage option each operator `application' just yields a new `memory cell' containing pointers to the arguments of the operator, with initialised attributes. The second storage option, called `uniq',\index{uniq storage option} is more interesting. It will guarantee that if the operator is once called with a certain set of arguments, each additional call with the {\em same} arguments will yield a pointer to the cell that was created by the first call. The result is that common (sub)trees are automatically shared. This technique is known as `hashed-consing'\index{hashed-consing} (because consing is the LISP function to create new cells, and hashing is used to implement this uniqueness of representation property). In this storage option attributes will be initialised only at the first call. Obviously, side effects on subterms can jeopardize this scheme: terms maintained under unique storage should not be modified (though their attributes may be modified because they do not contribute to the uniqueness). An example\index{example!uniq phylum} application is as follows. \lgrindfile{uniq_phylum_example.tex} %lgrindupdate 16 -> 8 Suppose that for each defining occurrence of an identifier a term is created with the attribute {\sf type} appropriately set, then to check the type one merely has to `create' it again, e.g. through {\sf Str(yourstring)} and look at the attribute. In the same way one can check if the identifier is already defined at a defining occurrence. A sketch of this code is as follows. It checks that an identifier is defined only once, and defined before its use. \index{example!symbol table}\index{symbol table!example} \lgrindfile{symbol_table_example.tex} \noindent Of course, this is not the most sophisticated example of a symbol table, but serves as an example. An essential condition on phyla definitions is that all constituent phyla of a `uniq' phylum are also `uniq'. The term processor warns at generation time about violations of this condition. As will be explained in Section~\ref{cook-ag}, in some cases a phylum that has inherited attributes should {\em not} be stored `uniq'. The following example\index{example!non-uniq phylum}\index{non-uniq phylum!example} declares ID as explicitly `non-uniq'. \lgrindfile{non_uniq_declaration_example.tex} \noindent %If both the `uniq' and the `non-uniq' storage option for a phylum have %been defined the termprocessor will report an error. Phyla may not be declared both {\sf \{uniq\}} and {\sf \{!uniq\}}. \subsection{Life Time of Terms} Terms maintained under non-unique storage can be freed using a {\sf free\_}{\em phylum} call. Terms maintained under unique storage cannot be freed individually. They are stored via hashtables, and their lifetime can only be controlled by manipulation of their hashtables. By default, Kimwitu stores all unique terms in the same hashtable. The following manipulations on hashtables are defined: creation, assignment, deletion and reuse. New, initially empty, hashtables of arbitrary size can be created and used instead of the default hashtable. A newly created hashtable is not used directly after creation, but only after it has been `assigned' - from that moment on it will be used instead of the previously assigned hashtable, until another hashtable gets assigned. A `de-assigned' hashtable is not freed; all its memory cells remain alive, so pointers to those cells remain valid. However, they are invisible to the create routines, and thus the unique storage guarantee no longer holds in the following sequence of events: an (uniquely maintained) operator is created with a certain set of arguments, and a memory cell for it is created in the hashtable, and the initialization code for its phylum is executed; a new hashtable is created and assigned; the same operator is called with the same set of arguments, a new memory cell for it is created in the new hashtable, and the initialization code for its phylum is executed again. A `de-assigned' hashtable can be freed when its memory cells are no longer needed. This can be used to control the lifetime of unique terms quite effectively, eg. to free intermediate results of a computation, as follows. First, a `temporary' hashtable is created to hold the intermediate results. Then, just before the computation is started, replace the default hashtable with the temporary one. When the computation is finished, the intermediate results and the final result are in the `temporary' hashtable. Re-assign the `old' hashtable, and copy the final result\footnote{Special care has to be taken of attributed terms, because the {\sf copy\_}{\em phylum} functions do at most copy the values of the attributes - if this value is a pointer to a term, make sure that this term is copied as well, if it is needed...}, to re-create the memory cells belonging to the final result in the `old' hashtable. Finally, free the `temporary' hashtable. Often only a limited number of phyla appears in the intermediate results. To allow more precise control, the phyla maintained under unique storage can be partitioned over multiple {\em storage-classes}, that each can have its own hashtable assigned. The memory cells created for a phylum are created in the hashtable assigned to the phylum's storage-class. Usually, phyla with a different `lifetime' will be in different storage classes. There is one predefined storage-class: {\em uniq}. Other storage-classes can be defined and used as follows:\index{storageclass declaration} \lgrindfile{storage_classes_example.tex} \noindent Here {\sf ID2} and {\sf ID3} belong to different storage-classes; if different hashtables are assigned to {\sf intermediate} and {\sf wholeprogram}, then the memory cells for {\sf Operator1} and {\sf Operator2} will be in another hashtable than those for {\sf Operator3}. A phylum can be in only {\em one} storage class. The term processor reports an error if this condition is violated. The {\sf \%storageclass} declaration is optional; if {\em Kimwitu} input does contain a storage class declaration, errors will be reported for all storage classes that were not explicitly declared. For each storage class, Kimwitu creates statically (at compile time) a hashtable. %%%Statically, one hashtable is allocated and assigned to all storage classes. Dynamically, hashtables can be created, (re)assigned to one or more storage classes, cleared and freed. The hashtable functions are listed in Section~\ref{gen-func}. If the default (statically allocated) hashtable is not used, ie. if all storage classes are reassigned to dynamically created hashtables {\em before they are used}, it is useful to redefine {\sf LARGEPRIME}\index{LARGEPRIME@{\sf LARGEPRIME}} : \lgrindfile{redefine_LARGEPRIME_example.tex} \noindent or use the {\sf -DLARGEPRIME=0} flag with {\sf cc} during compilation of file {\sf k.c} (see Section~\ref{overviewGeneratedNames}), to avoid the creation of the statically allocated hashtable. %%%By default, terms created with storage option `uniq' cannot be freed. %%%In some cases performance of a program increases when `uniq' terms can be freed, %%%eg. when they implement a symbol table that is only used during a short %%%period of the program run, or when during a computation many intermediate terms %%%are created. %%%For more control, {\em Kimwitu} offers hooks to manipulate the implementation %%%(hashtables) of the `uniq' storage option. %%%Phyla with storage option `uniq' can be partitioned over multiple %%%{\em storage classes} - a storage class is a collection of phyla that have %%%storage option `uniq' and for which the storage is collectively managed. %%% %%%Storage classes are defined statically. %%%The default storage class is `uniq'. %%%New storage classes can be defined and used as follows: \subsection{Function Definitions} \label{func-def} The structure of the generated C data types (see Section~\ref{gen-types}) for the phyla is very regular. Nevertheless it appears tedious to write C functions over these data types. Therefore there is a mechanism that allows easier expression of functions over phyla. %Of course, procedures can be defined by having a void function result. In this way case analysis and subterm selection is simplified. For example: \index{example!function definition}\index{function definition!example} \lgrindfile{function_definition_example_dollar.tex} % lgrindupdate phydef 16-> 8 \noindent %First a phylum %{\sf lisplist} %is defined here. %On this phylum an integer-valued function %{\sf len} %is defined with one argument of type %{\sf lisplist.} Here an integer-valued function {\sf len} is defined with one argument of type {\sf exprlist} (for {\sf exprlist} see Section~\ref{input-terms}). %The C code of this function body consists of a {\em with-statement}, %which does pattern matching on its {\sf el} argument. The function does pattern matching on the argument that is prefixed with {\sf \$}. In the case where more than one pattern matches, the {\em most} specific match is taken. \index{overlapping patterns} The patterns can be arbitrary terms with variables, string-literals (double-quoted) and int-literals. Non-leaf variables can be denoted as {\em variable}{\sf =}{\em subpattern}, as {\sf tt} in the example above. The construct {\sf *} can be used to denote an `anonymous' variable. As degenerate pattern an operator name {\em not} followed by parentheses can be used when one is not interested in the (number of) subphyla. The {\sf Nilexprlist} pattern above is an example of such a pattern. The `pattern' {\sf default} can be used to indicate a default case. In case there is no {\sf default}, the default becomes to give a run time error message. For each pattern a piece of C code is given between curly brackets. If several patterns share the same piece of C code, the patterns can be grouped (with separating commas). In this C code, pattern variables denote the various components of the term. The values {\sf \$1} etc. denote the subphyla of the term: in the example above {\sf len(t)} could also be written as {\sf len(\$1)}. The value {\sf \$0} denotes the term itself. Attributes can be referred to as e.g. {\sf variable$\rightarrow$value}. Alternatively, function bodies can be an arbitrary piece of C code. This code can contain {\em with-statements}\index{with-statements}, in which the same pattern matching can be expressed. For example, an alternative description of the function {\sf len} is: \index{example!function definition}\index{function definition!example} \lgrindfile{function_definition_example_newest.tex} % lgrind -T 4 Another construct in function bodies and C code is the {\em foreach-statement}, which expresses the iteration over a list. Its components are the loop variable, which automatically gets the type of the list element, the list to loop over, and a body. Yet another example of the {\sf len} function: \lgrindfile{foreach_statement_example.tex} The {\em foreach-with-statement} is useful if the body of the {\sf foreach} loop consists of only a {\sf with-statement} for the loop variable. Then the same syntactical shorthand as for the function definitions can be used: \lgrindfile{foreach_with_statement_example.tex} The following {\em foreach-pattern-statement} is useful if there is only one interesting pattern. Instead of a loop variable it takes a pattern. The body is only executed for those list elements that match the pattern. \lgrindfile{foreach_pattern_statement_example.tex} It is also possible to do pattern matching over multiple expressions in one {\sf with-statement} and loop over several lists in a {\sf foreach-statement}. The syntax for it is a straightforward extension of the 'singular' case. For example: \index{example!function definition}\index{function definition!example} \lgrindfile{function_definition_equiv.tex} \noindent Here we compare two {\sf expr} trees: if they have the same tree structure (form) we say that they are equivalent (and we don't care about the constant values in the leaves). For each dollar-prefixed argument we have a pattern; the patterns are grouped together with separating ampersand ({\sf \&}). Pattern groups that share the same piece of C code are grouped together with separating commas. Pattern-variables may appear multiple times in the patterns of an ampersand-linked pattern group, to indicate that subtrees have to be (structurally) equivalent. We can even use that to 'parameterize' a pattern: \index{example!pattern parameterizing}\index{pattern parameterizing!example} \lgrindfile{pattern_parameterized.tex} If comma-separated pattern groups share a common pattern or ampersand-linked pattern group, it can be factored out. For example, the two pattern groups below are equivalent: \index{example!pattern factoring}\index{pattern factoring!example} \lgrindfile{pattern_equivalence.tex} The {\sf foreach-statement} over multiple lists is actually a combination of the {\sf foreach-statement}, the {\sf foreach-with-statement} and the {\sf foreach-pattern-statement}. It loops over all lists at the same time, as long as each list still contains elements. For example: \index{example!pattern factoring}\index{pattern factoring!example} \lgrindfile{foreach_equiv.tex} \noindent Here we have a function in which we check that the elements of two lists are pairwise equivalent. At the right of the semicolon two list expressions appear (separated by commas, and each prefixed with its type). At the left of the semicolon a corresponding number of {\sf foreach-items} appears (seperated by ampersands), where each {\sf foreach-item} is either a variable (as in a {\sf foreach-statement}), a variable prefixed with a dollar (as in a {\sf foreach-with-statement}) or a pattern (as in a {\sf foreach-pattern-statement}). The body contains either C-code, or patterns with C-code (if one or more {\sf foreach-items} was a dollar-prefixed variable). Our {\sf equiv\_lists} function has one disadvantage: the body of the {\sf foreach} is executed for each pair of elements, as long as {\sf all} (both) lists are non-empty, so, to know whether the two lists have the same length, we would have to test explicitly for the length. There is an experimental feature to get around this: it is possible to specify a clause that will be executed after the end of the foreach body, in which variables are bound to (or patterns are matched with) the remaining sub-lists: \lgrindfile{foreach_equiv_afterforeach.tex} \noindent The argument of the {\sf afterforeach} has the same number of items as the corresponding {\sf foreach}. The items can be patterns (over the {\sf list}type, not the {\sf list}element type), dollar-prefixed variables and ordinary variables. The body, which can contain C-code, or patterns (if there are dollar-prefixed variables), is exectuted exactely one time. Actually, the use of patterns and dollar-prefixed variables are just syntactical sugar for a foreach statement with only variables, but with nested {\sf with-statements} in its body. For example, the two {\sf foreach-statements} below are equivalent: \index{example!pattern factoring}\index{pattern factoring!example} \lgrindfile{foreach_equivalence.tex} Most C code can contain so-called {\sf dollar-variables}\index{dollar-variables}: expressions of the form {\sf \$i} where {\sf i} is {\sf 0, 1, 2, ...}. The context in which the {\sf dollar-variables} are used defines whether {\sf dollar-variables} may be used, and to which values they are bound. The {\sf dollar-variables} may {\em not} be used in a {\sf with-statement} over more than one expression, or in a {\sf foreach-statements} that induce such a {\sf with-statement}: {\sf foreach-statements} with more than one dollar-prefixed variable, and {\sf foreach-statements} without dollar-prefixed variables but with more than one pattern. The {\sf dollar-variable} {\sf \$0} is bound to one of the following: the {\sf C-expression} that is the argument of a {\sf with-statement}, the function- or foreach-parameter that is preceded by a {\sf \$}, the pattern of a {\sf foreach-pattern-statement}, or the phylum that is being created, when used in phylum-initialisation code. A {\sf dollar-variable} {\sf \$i}, {\sf i $>$ 0} is bound to the {\sf i}-th subterm of the enclosing {\sf with-}, {\sf foreach-with-} or {\sf foreach-pattern-statement}. Note that a {\sf foreach-statement} does {\em not} introduce {\sf dollar-variables}. In case of multiple candidates for bindings, the one from the smallest enclosing scope is chosen. It is seldom necessary to use dollar-variables, in most cases pattern variables can be used; the only exception is {\sf \$0} used in phylum-initialisation code. We strongly advise the use of patterns and pattern-variables, instead of degenerate patterns and dollar-variables, if only because it allows the term processor to warn against inconsistencies in its input. For example, if the number of subphyla of an operator is changed, the term processor will report an error if a pattern was not updated accordingly; however, it will not notice it when a dollar-variable was not updated to reflect the same change (lint may catch this, but there are cases that lint cannot catch). There is one use of dollar-variables that cannot be achieved in another way: in-place modification of terms.\index{terms!in-place modification} Try to avoid it, because it is quite easy to mess things up. Do not try to in-place modify terms maintained under unique storage. If you really need to in-place modify a term, you have the following options: In phylum initialization code (see Section~\ref{attr-terms-input}), an assignment to {\sf \$0} will modify the term under construction. In all other {\sf C texts} an assignment to {\sf \$0} is equivalent to a no-op. In those contexts, it {\em is} possible to assign to {\sf \$$i$} ($i > 0$), or to do something like: {\sf ... replacement\_term = ...; *\$0 = *replacement\_term; ... }, but this last solution may not be completely portable (it assigns a {\sf struct} to a dereferenced (*) {\sf struct pointer}). \subsection{Rewrite Definitions} Functional languages are a convenient formalism for expressing functions over trees. Another convenient formalism is formed by {\em rewrite rules}\cite{ehrig:act1}. For instance, if we have a certain equivalence over terms, then rewrite rules expressing this equivalence might define a procedure for computing a normal form of a term. Another use for term rewriting is as an alternative way of defining functions. For example to implement the function `plus' on natural numbers one can define `plus' as an operator and specify the rewrite rules such that the normal form does not contain a plus. The result of normalising (term rewriting) then is that the function is `evaluated'. (This is all abstract data type theory.) The notation for term rewrite rules is very simple. For example: \index{example!rewrite rule} \lgrindfile{rewrite_rule_general_example.tex} \noindent In this example {\sf x} is a variable, used in the term in the right-hand side. The meaning of this example is that every occurrence of the operator {\sf Neg} is replaced by an equivalent construct. In general both sides are terms with variables, where all variables of the right-hand side also appear on the left-hand side. Actually, the left-hand side can be the same kind of pattern that is allowed in function definitions. To allow `greater control' (a euphemism for hacking...) we allow arbitrary C functions and string- and int-denotations in the right-hand side (see below). For the collection of rewrite rules, the system generates a function {\sf rewrite\_}{\em phylum} for each phylum, which has the normalised form as its result. This function can be called in the same way as any other function. The rewrite functions have an additional argument of type {\sf rview}. Rewrite views can be used to group rewrite rules in separate rewrite systems. Rewrite rules can be made specific for a set of rewrite {\em views}\index{views!rewrite}\index{rewrite views} (or, added to one or more rewrite systems) by naming these views between the angle open bracket and the colon of a rewrite rule. For example: \lgrindfile{rewrite_views_example.tex} \noindent An omitted view list defaults to all rewrite views. If no views are specified, the angle brackets and colon may be omitted (as we usually do), as we show below for the rewrite rule for {\sf Neg(x)} that we gave above. \index{example!rewrite rule} \lgrindfile{rewrite_rule_example.tex} \noindent This is equivalent to a view list that only contains the view name {\sf base\_rview}. The type {\sf rview} is an enumeration type of all view names occurring in the rewrite rules. It always contains the view name {\sf base\_rview}. Rewrite views can be declared as in the following example:\index{rewrite view declaration}\index{view declaration!rewrite} \lgrindfile{rewrite_view_declaration_example.tex} \noindent Rewrite views should be declared. If the termprocessor input contains one or more rewrite view declarations it will report errors for all rewrite views that are used and not declared. The use of view declarations may help to find typing errors in view names -- in term processor input that does not contain view declarations, the misspelling of a view name may just introduce a new view (with the misspelled name). If several rewrite rules share the same left-hand side, they may be combined by grouping the right-hand sides (with seperating commas), provided that all the angle brackets haven't been omitted from the right-hand sides. If several rewrite rules share the same right-hand side, they may be combined by grouping the left-hand sides (with seperating commas). The general form of a rewrite rule is shown below: \lgrindfile{rewrite_rule_general.tex} How we can prove that a rewrite system always yields a normal form is beyond the scope of this manual, but informally stated, each rule should bring the term `closer' to its normal form, and the order in which the rules are applied (the {\em rewrite strategy})\index{rewrite strategy} should not matter. Two simple warnings: never ever use a rule in which the right-hand side is equal to the left-hand side, or a rule that directly expresses a commutative property. The default rewrite strategy is leftmost innermost. In the case of overlapping patterns\index{overlapping patterns}, where more than one left-hand side matches, the {\em most} specific match is taken. Rewrite rules with functional right-hand sides are a bit harder to prove correct. The first requirement of course is that the arguments and the results of those functions should have the correct types, which {\sf lint} can check. The second requirement is that the rule is meaningful, but that is hard to check automatically. The writer of such rules should convince herself of the correctness of the rewrite rules. Here we give some considerations that can be used in doing so. The `purpose' of a rewrite system is to convert a term to its {\em normal form}. Now let us look closer at a rule with a function in it: \lgrindfile{rewrite_rule_function_example.tex} \noindent The rewrite system uses this rule when it has a subterm that matches the left-hand side, and `assumes' that substituting it by the right-hand side brings it closer to the normal form. It then continues applying rules on the new term. The function {\sf Function} should therefore bring the expression closer to the normal form. Now what about the argument {\sf x} of {\sf Function}? It is not known whether or not it is in normal form already (although the default strategy, which is leftmost innermost, does guarantee this). Therefore, {\sf Function} cannot assume that its argument is in normal form. This is not usually a problem, but if it is, the function {\sf rewrite\_}{\em phylum} can be used in the function definition to guarantee this. The other side of the picture is that the {\em result} of {\sf Function} does not have to be in normal form, which is convenient for the writer of {\sf Function}. %(Future improvements: Add user-selectable %rewrite strategies, conditional rules, and make rewrite (selectively) a memo-function.) \subsection{Unparsing Definitions} \label{unp-def} Unparsing definitions describe textual representations of terms. The definitions describe a tree walk over terms by associating {\em unparsing rules} with patterns. For the collection of unparsing definitions {\em Kimwitu} generates a function {\sf unparse\_}{\em phylum} for each phylum. The patterns are the same patterns as can occur in rewrite definitions and with-statements. An unparsing rule contains a list of unparse {\em views}\index{views!unparse}\index{unparse views} and a list of {\em unparse items}. We'll discuss the views later. An unparse item defines an action for an {\sf unparse\_}{\em phylum} function and can be any of the following. \begin{description} \item[\tt "a string"] Each string denotation will be printed as such. \item[\sf \{ \ldots \}] Between curly brackets, arbitrary C code can be used, in which the pattern variables and {\sf \$0} (which is a reference to the entire left hand term) can be used. This C text is inserted in the unparse function. The C text can contain curly brackets, nested in matching pairs. If a non matching bracket needs to be included it can be escaped with a {\sf \$}, e.g. {\sf \$\{} and {\sf \$\}}. \item[\em variable] An occurrence of a pattern variable is translated into an invocation of the unparse function for the phylum of the pattern variable. \item[\em variable$\rightarrow$attribute] An occurrence of an attribute is translated into an invocation of the unparse function for the phylum or type of the attribute. An attribute of an attribute is denoted by {\em variable$\rightarrow$attribute$\rightarrow$attribute} etc. The unparsing function for a type that is not a phylum has to be provided by the user in a separate piece of C code. \item[\em (typename)variable] A prefixed cast has the effect that the unparse function for that type will be called for the variable (or its attribute). This is useful for the unparsing of non-pattern variables, eg. variables that are defined in C code. \item[\em variable:view] An appended unparse view name (see below) has the effect that the unparse function for the variable will be called with this view rather than the current view. This can also be done with attributes, casted variables and their attributes, and even strings. \item[\sf \$\{ {\em unparse items} \$\}] A list of unparse items enclosed by escaped bracket is translated into a curly open-bracket followed by the translation of the {\em unparse items} followed by a curly close-bracket. \end{description} How an unparse item is actually printed will be discussed later. First an example, containing strings and pattern variables.\index{example!unparse definition} \lgrindfile{unparse_definition_example.tex} % lgrindupdate 32 -> 26 In the case of overlapping patterns\index{overlapping patterns}, the {\em most} specific match is preferred. This is used in the example for the introduction of list element separators, see the last line of the example. The number of separators is one less than the number of list elements. For each operator there is always a default pattern, in case none of the patterns match. The unparsing rule associated with this default pattern unparses all its subphyla. The use of the escaped brackets is illustrated in the example below. \lgrindfile{unparse_escaped_brackets_example.tex} The unparse functions have an additional argument of type {\sf uview}. Unparsing rules can be made specific for a set of unparse views by naming these views between the square open bracket and the colon of an unparsing rule. For example: \lgrindfile{unparse_views_example.tex} \noindent An omitted view list defaults to all unparse views. This is equivalent to a view list that only contains the view name {\sf base\_uview}. The type {\sf uview} is an enumeration type of all view names occurring in the unparsing rules. It always contains the view name {\sf base\_uview}. Views can be declared as in the following example:\index{unparse view declaration}\index{view declaration!unparse} \lgrindfile{unparse_view_declaration_example.tex} \noindent Unparse views should be declared. If the termprocessor input contains one or more unparse view declarations it will report errors for all unparse views that are used and not declared. The use of view declarations may help to find typing errors in view names -- in term processor input that does not contain view declarations, the misspelling of a view name may just introduce a new view (with the misspelled name). If several unparse rules share the same left-hand side, they may be combined by grouping the right-hand sides (with seperating commas). If several unparse rules share the same right-hand side, they may be combined by grouping the left-hand sides (with seperating commas). The general form of an unparse rule is shown below: \lgrindfile{unparse_rule_general.tex} The {\sf unparse\_}{\em phylum} functions that are generated have three arguments: the term to be unparsed, a ({\sf void}) function, the {\em printer}, to be applied to each string (including string denotations of the predefined phyla) and an argument of type {\sf uview}, which is passed to the printer function. The C code in the unparsing rules can refer to the latter two arguments directly by the names {\sf kc\_printer} and {\sf kc\_current\_view} respectively. The user provides the printer function. The simplest example of such a function and its use is as follows. \index{example!printer function}\index{printer function!example} \lgrindfile{printer_function_example.tex} There are two features that are notably missing from unparsing rules. A feature for handling list element separators is not necessary, as is shown above. A feature for specifying indentation in the output can be emulated in the printer function. How to do that is explained in Section~\ref{cook-unp}. \subsection{Including Other Definitions} \label{incl-code} As will be presented later, {\em Kimwitu} generates a number of {\sf .h} files, and a number of {\sf .c} files that include those {\sf .h} files. It may be necessary to include in those files additional C code or definitions, for instance because a type is defined that is used in the attribute declarations, or a function from a system library, such as {\sf math.h}\index{math.h@{\sf math.h}}, is used in C code. The mechanism for this is similar to that of {\sf yacc}, and the syntax for it is illustrated in the following example. \index{example!including in generated files}\index{including in generated files!example} \lgrindfile{including_in_generated_files_example.tex} Inclusions like these can appear anywhere between declarations. They go to the beginning of the {\sf .c} file that is being generated from the {\sf .k} file in which this construct appears. In case it is necessary to make an inclusion in {\em another} file, the distinguished symbol\index{distinguished symbol} of that file can be used to redirect the inclusion, as in the following example. To make the same inclusion in several files, the redirection symbols of those files, separated by whitespace, can be given on the same line as the {\sf \%\{}. \lgrindfile{include_redirection_example.tex} % (?? this example is too vague/ general, include something with \#ifdef as well) For further information consult Section~\ref{gen-file}. \section{Output} \label{sec:output} {\em Kimwitu} generates a number of C files. They contain data types and functions on those data types. In this section we present the details of the term processor output. \subsection{Generated Data Types} \label{gen-types} For each phylum a C data type is generated. Its name is the same as the phylum so it can be arbitrarily used in a C program. Technically, it is a structure containing the attributes, a variant selector (cf. the operator) and a union of the alternatives. Note that this scheme allows type checking over C programs to check if a term is constructed from the correct phyla. An additional data type is {\sf YYSTYPE}\index{YYSTYPE@{\sf YYSTYPE}}, which can be used in {\sf yacc}-generated\index{yacc@{\sf yacc}} parsers to construct terms. The generated C for the example in Section~\ref{input-terms} and Section~\ref{attr-terms-input} is given below. Note, it is rarely necessary to directly refer to these C structures, as function definitions are much more convenient. \index{example!generated data type}\index{generated data type!example} \lgrindfile{generated_data_type_example.tex} The term processor generates a C type definition {\sf boolean}\index{boolean@{\sf boolean}} with constants {\sf True}\index{True@{\sf True}} and {\sf False}\index{False@{\sf False}} that have the usual C interpretation (namely {\sf int}, 1, and 0). The names of all the rewrite and unparse views are collected in enumeration types {\sf rview}\index{rview@{\sf rview}} respectively {\sf uview}\index{uview@{\sf uview}}. Whether or not the user has used it, these types contains a value {\sf base\_riew}\index{base\_riew@{\sf base\_riew}} respectively {\sf base\_uview}\index{base\_uview@{\sf base\_uview}}. \subsection{Generated Functions} \label{gen-func} In this section we describe the various functions that are generated. Every function can be used like any other C function. In the names, the following `meta' notation is used: \begin{itemize} \item {\em phylum} is a phylum name (user-defined or predefined), in the list functions {\em phylum} corresponds to the list elements. \item {\em list} is the name of a list phylum. \item {\em Operator} is an operator name. \item {\em result function}(); is the definition of a function found in the term processor input. \end{itemize} \fbox{\sf {\em phylum} {\em Operator}( {\em phylum\_sub1} sub1, ..., {\em phylum\_subn} subn );} For each operator a C function is declared, with the same name. This function implements the storage option and returns (a pointer to) the term constructed from its arguments. {\sf \fbox{\parbox{7cm}{ void *emalloc( kc\_size\_t n ); \\ void *ecalloc( kc\_size\_t n, kc\_size\_t m ); \\ void *erealloc( void *r, kc\_size\_t s ); \\ }}}\index{emalloc@{\sf emalloc}}\index{ecalloc@{\sf ecalloc}}\index{erealloc@{\sf erealloc}} The {\sf emalloc} and friends functions are used for allocating memory, they call the system memory allocation routines and check the return status. The {\sf emalloc} and friends functions' interfaces use the {\sf void*} type\footnote{actually, for ease of portability to systems that don't have the {\sf void*} type it uses {\sf kc\_voidptr\_t}, which is typedef'ed to {\sf void*}. }, and hide the fact that some {\sf malloc} libraries use the {\sf char*} type from the user. The {\sf uniqmalloc} and friends routines are used to implement the hashtables' storage. They claim large chunks from the system and pass out small blocks. These blocks cannot be freed by themselves, it is only possible to free a complete collection of chunks as a whole. Each collection of chunks can be identified by its {\sf kc\_memory\_info\_t*}. %\fbox{\sf kc\_memory\_info\_t *uniqmallocinit( kc\_size\_t n );} \fbox{\sf kc\_memory\_info\_t *uniqmallocinit( kc\_size\_t n );} \index{uniqmallocinit@{\sf uniqmallocinit}} Initialize the allocation information of a new set-of-chunks and return a pointer to it. %{\sf n} is the size of chunks that should be claimed from the system. %A default chunksize is used if ${\sf n} \leq 0$. \fbox{\sf void *uniqmalloc( kc\_size\_t n, kc\_memory\_info\_t *mi );} \index{uniqmalloc@{\sf uniqmalloc}} Returns a pointer to a block of size {\sf n} from the chunks administrated in {\sf mi}. \fbox{\sf boolean isinuniqmalloccedblock( void *p, kc\_memory\_info\_t *mi );} \index{isinuniqmalloccedblock@{\sf isinuniqmalloccedblock}} Report whether or not {\sf p} was allocated via {\sf mi}. \fbox{\sf void uniqfreeall( kc\_memory\_info\_t *mi ); } \index{uniqfreeall@{\sf uniqfreeall}} Free all chunks allocated (managed) via {\sf mi }. \fbox{\sf void uniqfreeelement( void *, kc\_memory\_info\_t *mi );} \index{uniqfreeelement@{\sf uniqfreeelement}} Free one block allocated by {\sf uniqmalloc}. However, its current implementation is a no-op. In the generated code these allocation routines are used via the following macros: {\sf MALLOC}\index{MALLOC@{\sf MALLOC}} and friends are used for all allocation, except for the allocation of memory by the {\em Operator} functions. {\sf UNIQMALLOC2}\index{UNIQMALLOC2@{\sf UNIQMALLOC2}} is used to allocate memory for the phyla that have the `uniq' storage option. {\sf NONUNIQMALLOC}\index{NONUNIQMALLOC@{\sf NONUNIQMALLOC}} is used for the remaining phyla. If you want different ones, redefine {\sf MALLOC} and friends, in your included code. For example, the default definition of {\sf MALLOC} and {\sf NONUNIQMALLOC} is {\sf emalloc}, the default definition of {\sf UNIQMALLOC2} is {\sf uniqmalloc}. {\sf kc\_size\_t}\index{kc_size_t@{\sf kc\_size\_t}} is by default typedef'ed to {\sf unsigned}. The generated code contains the definition of the enumerated type {\sf kc\_storageclass\_t}\index{kc_storageclass_t@{\sf kc\_storageclass\_t}} , which contains all storage classes, including the predefined storage class {\sf uniq}. \fbox{\sf kc\_hashtable\_t kc\_ht\_assign( kc\_hashtable\_t ht, kc\_storageclass\_t sc );} \index{kc_ht_assign@{\sf kc\_ht\_assign}} Makes {\sf ht} the hashtable for storage class {\sf sc}, and return the previously assigned hashtable. \fbox{\sf kc\_hashtable\_t kc\_ht\_assigned( kc\_storageclass\_t sc );} \index{kc_ht_assigned@{\sf kc\_ht\_assigned}} Return the hashtable assigned to storage class {\sf sc}. \fbox{\sf kc\_hashtable\_t kc\_ht\_create\_simple( kc\_size\_t n );} \index{kc_ht_create_simple@{\sf kc\_ht\_create\_simple}} Creates a hashtable of size {\sf n}, using the {\sf kc\_ht\_create\_bucketmanagement} routine with default allocation routines ({\sf uniqmalloc} and friends). For the {\sf bucket\_alloc} routines {\sf ecalloc} and friends are used. {\sf \fbox{\parbox{10cm}{ kc\_hashtable\_t kc\_ht\_create( \\ \mbox{}\hspace{2cm} kc\_size\_t n, \\ %\mbox{}\hspace{2cm} void *(*uniq\_malloc\_init)( kc\_size\_t n ), \\ \mbox{}\hspace{2cm} void *(*uniq\_malloc\_init)(), \\ \mbox{}\hspace{2cm} void *(*uniq\_malloc)(kc\_size\_t n, void *mi), \\ \mbox{}\hspace{2cm} void (*uniq\_free\_element)(void *p, void *mi), \\ \mbox{}\hspace{2cm} void (*uniq\_free\_all)(void *p, void *mi), \\ \mbox{}\hspace{2cm} boolean (*in\_block)(void *p, void *mi) ); }}} \index{kc_ht_create@{\sf kc\_ht\_create}} Creates a hashtable of size {\sf n} with the given allocation routines. A {\sf 0} argument can be given for {\sf uniq\_free\_all}, {\sf uniq\_free\_element} and/or for {\sf in\_block}, if those routines are not implemented. This routine uses the {\sf kc\_ht\_create\_bucketmanagement} routine with default bucket allocation routines ({\sf ecalloc} and friends). %The {\sf uniq\_malloc\_init} routine will be called with a {\sf 0} argument. {\sf \fbox{\parbox{13cm}{ kc\_hashtable\_t kc\_ht\_create\_bucketmanagement( \\ \mbox{}\hspace{2cm} kc\_size\_t n, \\ %\mbox{}\hspace{2cm} void *(*uniq\_malloc\_init)( kc\_size\_t n ), \\ \mbox{}\hspace{2cm} void *(*uniq\_malloc\_init)(), \\ \mbox{}\hspace{2cm} void *(*uniq\_malloc)(kc\_size\_t n, void *mi), \\ \mbox{}\hspace{2cm} void (*uniq\_free\_element)(void *p, void *mi), \\ \mbox{}\hspace{2cm} void (*uniq\_free\_all)(void *p, void *mi), \\ \mbox{}\hspace{2cm} boolean (*in\_block)(void *p, void *mi), \\ \mbox{}\hspace{2cm} void (*uniq\_info)(void *mi), \\ \mbox{}\hspace{2cm} void *(*bucket\_alloc\_init)(), \\ \mbox{}\hspace{2cm} void *(*bucket\_calloc)(kc\_size\_t n, kc\_size\_t s, void *bi), \\ \mbox{}\hspace{2cm} void *(*bucket\_realloc)(void *b, kc\_size\_t old, kc\_size\_t new, void *bi), \\ \mbox{}\hspace{2cm} void (*bucket\_free)(void *b, kc\_size\_t s, void *bi), \\ \mbox{}\hspace{2cm} void (*bucket\_free\_all)(void *bi), \\ \mbox{}\hspace{2cm} int bucket\_increment, \\ \mbox{}\hspace{2cm} void (*bucket\_info)(void *mi) ); }}} \index{kc_ht_create_bucketmanagement@{\sf kc\_ht\_create\_bucketmanagement}} Creates a hashtable of size {\sf n} with the given allocation routines. A {\sf 0} argument can be given for {\sf uniq\_free\_all}, {\sf uniq\_free\_element} and/or for {\sf in\_block}, if those routines are not implemented, as well as for the info routines, and either the {\sf bucket\_free} or the {\sf bucket\_free\_all} routines. %The {\sf uniq\_malloc\_init} routine will be called with a {\sf 0} argument. \fbox{\sf void kc\_ht\_clear( kc\_hashtable\_t ht);} \index{kc_ht_clear@{\sf kc\_ht\_clear}} Clears hashtable {\sf ht}, ie. frees its elements (either using {\sf uniq\_free\_elements} or, preferably, {\sf uniq\_free\_all}), and its buckets. The result of this routine should be equivalent with calling {\sf kc\_ht\_delete} followed by {\sf kc\_ht\_create}. \fbox{\sf void kc\_ht\_delete( kc\_hashtable\_t ht); } \index{kc_ht_delete@{\sf kc\_ht\_delete}} First {\sf kc\_ht\_clear}s the hashtable, and then frees the hashtable itself. \fbox{\sf void kc\_ht\_reuse( kc\_hashtable\_t ht);} \index{kc_ht_reuse@{\sf kc\_ht\_reuse}} Prepares hashtable {\sf ht} for reuse, ie. frees its elements, but not its buckets. \fbox{\parbox{7cm}{ {\sf casestring mkcasestring( char *s ); \\ nocasestring mknocasestring( char *s ); }}}\index{mkcasestring@{\sf mkcasestring}}\index{mknocasestring@{\sf mknocasestring}} The basic phylum constructors. They convert a value of a C string into the corresponding phylum value. From a {\sf casestring} or {\sf nocasestring} value the string can be referenced as {\sf cs-$>$name}. In the case of case insensitive strings, this field will contain the string as it is capitalized at its first occurrence. \fbox{\sf {\em result} {\em function}( {\em args} );} The template of user-provided functions. \fbox{\sf boolean eq\_{\em phylum}( {\em phylum} p1, {\em phylum} p2 );} \index{eq_phylum@{\sf eq\_{\em phylum}}} The function that tests for structural equality of terms. Attribute values do not influence the comparison. If the phylum is uniquely represented, the test always takes constant time. Note that the comparison of a {\sf casestring} with a {\sf nocasestring} is not type correct. \fbox{\sf void free\_{\em phylum}( {\em phylum} p, boolean recursive);} \index{free_phylum@{\sf free\_{\em phylum}}} The node {\sf p} is zeroed and freed. If {\sf recursive} is {\sf True}, the subterms are freed as well. The body of a free function for a phylum with storage class {\sf uniq} is empty. \fbox{\sf void freelist\_{\em list}( {\em list} p );} \index{free_list@{\sf free\_{\em list}}} Free the list, but not the elements of the list. In other words, free the spine. \fbox{\sf {\em list} concat\_{\em list}( {\em list} l1, {\em list} l2);} \index{concat_list@{\sf concat\_{\em list}}} Produce a new list that is the concatenation of the two arguments. If the second argument is the empty list, and the list phylum is not uniquely represented, this is equivalent to a copy of the list nodes (the `spine'). The list elements are never copied. \fbox{\sf {\em phylum} copy\_{\em phylum}( {\em phylum} p, boolean copy\_attributes );} \index{copy_{\em phylum}@{\sf copy\_{\em phylum}}} Return a copy of {\sf p}. If {\sf copy\_attributes} is {\sf False} attribute values are initialised as defined in the definition of {\em phylum}. If {\sf copy\_attributes} is {\sf True} the values of the attributes are copied. Please note that this does not imply that the attributes themselves are copied. This function is ineffective on uniquely represented phyla. \fbox{\sf {\em list} reverse\_{\em list}( {\em list} l);} \index{reverse_list@{\sf reverse\_{\em list}}} Produce a list that is the reverse of the argument list. \fbox{\sf int length\_{\em list}( {\em list} l);} \index{length_list@{\sf length\_{\em list}}} Yield the number of elements in a list, a nil list has zero elements. \fbox{\sf {\em phylum} last\_{\em list}( {\em list} l);} \index{last_list@{\sf last\_{\em list}}} Yield the last element of a list, or an error if there isn't one. % actually it will then return a null pointer \fbox{\sf {\em list} map\_{\em list}( {\em list} l, {\em phylum}(*f)({\em phylum}));} \index{map_list@{\sf map\_{\em list}}} Yield the list constructed from the element-wise application of {\sf f}. It `lifts' {\sf f} to a function on lists. \fbox{\sf {\em list} filter\_{\em list}( {\em list} l, boolean(*f)({\em phylum}));} \index{filter_list@{\sf filter\_{\em list}}} Yield the list of elements of which {\sf f} is {\sf True}. \fbox{\parbox{8cm}{ {\sf void fprint\_{\em phylum}( FILE *f, {\em phylum} p ); \\ void print\_{\em phylum}( {\em phylum} p ); }}} \index{print_phylum@{\sf print\_{\em phylum}}} \index{fprint_phylum@{\sf fprint\_{\em phylum}}} Print the argument term on a given file ({\sf f = 0} means standard output -- {\sf print\_{\em phylum}( {\em p} )} is identical to {\sf fprint\_{\em phylum}( 0, {\em p} )}) in a canonical format. In this format each operator appears on a separate line. It is mainly intended for debugging. {\sf \fbox{\parbox{10cm}{ void fprintdot\_{\em phylum}( \\ \mbox{}\hspace{2cm} FILE *f, \\ \mbox{}\hspace{2cm} {\em phylum} p, \\ \mbox{}\hspace{2cm} char *root\_label\_prefix, \\ \mbox{}\hspace{2cm} char *edge\_label\_prefix, \\ \mbox{}\hspace{2cm} char *edge\_attributes, \\ \mbox{}\hspace{2cm} boolean print\_node\_labels, \\ \mbox{}\hspace{2cm} boolean use\_context\_when\_sharing\_leaves, \\ \mbox{}\hspace{2cm} boolean print\_prologue\_and\_epilogue ); \\ void fprintdotprologue( FILE *f ); \\ void fprintdotepilogue( FILE *f ); }}} \index{fprintdot_phylum@{\sf fprintdot\_{\em phylum}}} \index{fprintdotprologue@{\sf fprintdotprologue}} \index{fprintdotepilogue@{\sf fprintdotepilogue}} Print the argument term {\sf p}, with its attributes, on a given file ({\sf f = 0} means standard output) in {\em dot} input format. {\em Dot} is a program that draws directed acyclic graphs in various output formats, among which are postscript and gif. It is available as part of the {\em GraphViz} graph visualisation package\cite{graphviz}. If {\sf print\_node\_labels} is {\sf True}, the nodes are labeled with the names of the operators, the types of the subtrees, and the attribute names and types (or values, for boolean attributes), and the root node is labeled with {\sf root\_label\_prefix} followed by {\em phylum}. The edges are numbered; the edge numbers show a depth-first left-to-right treewalk. The numbers are prefixed with the contents of {\sf edge\_label\_prefix}, which can make it easier to see to which tree an edge belongs when several trees are combined in the same picture, as discussed below. Attributes of the edges (colors, fonts, etc.) can be given with {\sf edge\_attributes}. Shared non-leaf tree nodes appear as such in the drawing. For leaf nodes (like integers, (no)casestrings) the amount of sharing can be influenced with the {\sf use\_context\_when\_sharing\_leaves} argument: if it is {\sf False} leaf nodes with the same value will be shared (which resembles the internal program structure, but may give pictures that are a bit confusing, especially if no non-leaf nodes are shared), if it is {\sf True} then the ancestor of the leaf node is taken into account for the sharing, i.e. leaf nodes with the same value but different ancestors will not be shared. Finally, it is possible to combine several trees in one drawing. To do so, start by invoking {\sf fprintdotprologue}. Next, invoke {\sf fprintdot\_{\em phylum}} for each tree, with argument {\sf print\_prologue\_and\_epilogue} set to {\sf False}. You likely want to use a different {\sf edge\_label\_prefix} for each tree. Finally, invoke {\sf fprintdotepilogue}. If only one tree is to be drawn, then it is sufficient to invoke {\sf fprintdot\_{\em phylum}} with {\sf print\_prologue\_and\_epilogue} set to {\sf True} (and {\sf fprintdotprologue} and {\sf fprintdotepilogue} need not be invoked 'by hand'). \fbox{\sf int kc\_set\_fprintdot\_hashtablesize( int s );} \index{kc_set_fprintdot_hashtablesize@{\sf kc\_set\_fprintdot\_hashtablesize}} Set the size of the hashtable that the {\sf fprintdot\_{\em phylum}} routines use to detect node-sharing. Return the previous value. \fbox{\sf {\em phylum} rewrite\_{\em phylum}( {\em phylum} p, rview v );} \index{rewrite_phylum@{\sf rewrite\_{\em phylum}}} Yield the normal form of the first argument with respect to the rewrite system and view. \fbox{\sf void unparse\_{\em phylum}( {\em phylum} p, void(*kc\_printer)(char *s, uview v), uview kc\_current\_view );} \index{unparse_phylum@{\sf unparse\_{\em phylum}}} Unparses the argument {\sf p} and its descendants, with view and printer as given. \fbox{\parbox{8cm}{ {\sf char *CSGIOread\_{\em phylum}( FILE *f, {\em phylum} *ptr ); \\ char *CSGIOwrite\_{\em phylum}( FILE *f, {\em phylum} p ); }}} \index{CSGIOread_phylum@{\sf CSGIOread\_{\em phylum}}} \index{CSGIOwrite_phylum@{\sf CSGIOwrite\_{\em phylum}}} To read and write a term from a structure file (see Section~\ref{sg-io}). {\sf FILE} is the standard file i/o type defined in {\sf $<$stdio.h$>$}. The functions return a null pointer if all went well. If not, they return a pointer to a string containing an error message. \fbox{\sf int kc\_set\_csgio\_hashtablesize( int s );} \index{kc_set_csgio_hashtablesize@{\sf kc\_set\_csgio\_hashtablesize}} Set the size of the hashtable that the {\sf CSGIOwrite\_{\em phylum}} routines use to detect node-sharing. Return the previous value. \fbox{\sf boolean kc\_set\_csgio\_sharing( boolean b );} \index{kc_set_csgio_sharing@{\sf kc\_set\_csgio\_sharing}} Disable ({\sf b = False}) or re-enable the use of node-sharing while {\em writing} structure files. This does not affect the ability to {\em read} structure files that contain node-sharing! By default (if this routine is not called) node-sharing in structure files is enabled. Return the previous value. \fbox{\sf void kc\_print\_operator\_statistics( FILE *f );} \index{kc_print_operator_statistics@{\sf kc\_print\_operator\_statistics}} Print for each operator its size in bytes, the number of create calls, the number of actually created elements (usually much lower if its phylum has the {\sf uniq} storage option), the number of free calls (recursive and non-recursive), the number of freed elements, the number of remaining nodes and the amount of space that these take, in bytes. The routine is equivalent to a no-op, unless the preprocessor symbol {\sf KC\_STATISTICS}\index{KC_STATISTICS@{\sf KC\_STATISTICS}} is defined. \fbox{\sf void kc\_print\_hash\_statistics( FILE *f );} \index{kc_print_hash_statistics@{\sf kc\_print\_hash\_statistics}} Print the number of hashtable buckets that contain zero, one, two, three, four, five, six, seven, eight or more elements. \fbox{\sf void kc\_print\_hashtable\_memory\_statistics( FILE *f, kc\_hashtable\_t h );} \index{kc_print_hashtable_memory_statistics@{\sf kc\_print\_hashtable\_memory\_statistics}} Print information about the memory management of {\sf h}. (This will only be possible if node- and bucket memory-management routines have been specified.) \subsection{Predefined Phyla and Operators} \label{gen-predef} The following phyla and operators are predefined. They cannot be redefined by the user's input. These predefined phyla have the property of unique representation. The predefined operators are only used in conjunction with the Synthesizer Generator, see Section~\ref{sg-io}. \index{int@{\sf int}} \index{casestring@{\sf casestring}} \index{nocasestring@{\sf nocasestring}} \begin{sf} \begin{tabular}{ll} \rm Phyla & \rm Operators \\ \hline int & \_Int {\em mapped directly to C} \\ casestring & \_Str \\ nocasestring & NoCaseStr \end{tabular} \end{sf} The operators mentioned here should not be used. Instead see the description of the functions {\sf mkcasestring}, and {\sf mknocasestring} in Section~\ref{gen-func}, which create values of these phyla. \subsection{File Names, Preprocessor Symbols and Redirection} \label{gen-file} The system generates at least 5 {\sf .c} files and 5 {\sf .h} files from its input. In particular, for each input {\em file}{\sf .k}, a {\em file}{\sf .c} and a {\em file}{\sf .h} is generated that contain the functions defined in that input file. If there is no file argument, {\em Kimwitu} reads from standard input and {\em file} then takes the value {\sf stdin}. The names of the generated files, and their include structure are given in the following table. Each {\sf .c} file also defines a distinguished symbol.\index{distinguished symbol} \begin{sf} \begin{tabular}{lll} \rm File & \rm Includes & \rm Distinguished Symbol \\ \hline k.h & & KC\_TYPES\_HEADER \\ k.c & k.h & KC\_TYPES \\ {\em file}.h & & KC\_FUNCTIONS\_{\em file}\_HEADER \\ {\em file}.c & k.h {\em file}.h & KC\_FUNCTIONS\_{\em file} \\ rk.c & k.h rk.h & KC\_REWRITE \\ unpk.c & k.h unpk.h & KC\_UNPARSE \\ csgiok.c & k.h csgiok.h & KC\_CSGIO \end{tabular} \end{sf} The distinguished symbols can also be used as redirection symbols, as in: \lgrindfile{include_redirection_symbol_example.tex} Additionally one can use the shorthands from the following table. \begin{sf} \begin{tabular}{ll} \rm Redirection Symbol & \rm File \\ \hline HEADER & {\em file}.h \\ CODE & {\em file}.c \\ {\em /*empty*/} & {\em file}.c \end{tabular} \end{sf} The use of distinguished symbols is further illustrated in Section~\ref{incl-code}. \subsection{Overview of Generated Names} \label{overviewGeneratedNames} This section contains an limited overview of the generated names. In the names, the `meta' notation of Section~\ref{gen-func} is used, with the following extension: \begin{itemize} \item {\em file} is the basename of the input file that corresponds to the generated thing (e.g. function). \end{itemize} As is indicated in the table, some of the macros can be redefined, to cater for specific applications. \vspace{1cm} \small % hier moeten de kolommen nog een eigen font krijgen, kol 1,2 sf \begin{tabular}{|@{ ~~\sf}l@{ ~\sf}lll|} \hline \rm Name & File & Type & See also Section\\ \hline KC\_TYPES\_HEADER & k.h & macro & \ref{gen-file}\\ KC\_TYPES & k.c & macro & \ref{gen-file}\\ LARGEPRIME & k.c & redefinable macro & \ref{storage-options-input}\\ UNIQMALLOC2 & k.c & redefinable macro & \ref{gen-func}\\ NONUNIQMALLOC & k.c & redefinable macro & \ref{gen-func}\\ MALLOC & k.c, csgiok.c & redefinable macro & \ref{gen-func}\\ CALLOC & k.c, csgiok.c & redefinable macro & \ref{gen-func}\\ REALLOC & k.c, csgiok.c & redefinable macro & \ref{gen-func}\\ UNIQFREE & k.c & redefinable macro & \ref{gen-func}\\ NONUNIQFREE & k.c & redefinable macro & \ref{gen-func}\\ FREE & k.c, csgiok.c & redefinable macro & \ref{gen-func}\\ uniqmalloc & k.[ch] & {\sf void *} function & \ref{gen-func}\\ emalloc & k.[ch] & {\sf void *} function & \ref{gen-func}\\ ecalloc & k.[ch] & {\sf void *} function & \ref{gen-func}\\ erealloc & k.[ch] & {\sf void *} function & \ref{gen-func}\\ kc\_ht\_create\_bucketmanagement & k.[ch] & {\sf kc\_hashtable\_t} function & \ref{gen-func}\\ kc\_ht\_create & k.[ch] & {\sf kc\_hashtable\_t} function & \ref{gen-func}\\ kc\_ht\_create\_simple & k.[ch] & {\sf kc\_hashtable\_t} function & \ref{gen-func}\\ kc\_ht\_assign & k.[ch] & {\sf kc\_hashtable\_t} function & \ref{gen-func}\\ kc\_ht\_assigned & k.[ch] & {\sf kc\_hashtable\_t} function & \ref{gen-func}\\ kc\_ht\_clear & k.[ch] & {\sf void} function & \ref{gen-func}\\ kc\_ht\_delete & k.[ch] & {\sf void} function & \ref{gen-func}\\ kc\_ht\_reuse & k.[ch] & {\sf void} function & \ref{gen-func}\\ &&& \\ mkcasestring & k.[ch] & {\sf casestring} function & \ref{gen-func}\\ mknocasestring & k.[ch] & {\sf nocasestring} function & \ref{gen-func}\\ {\em Operator} & k.[ch] & {\em phylum} function & \ref{gen-func}\\ eq\_{\em phylum} & k.[ch] & {\sf boolean} function & \ref{gen-func}\\ fprint\_{\em phylum} & k.[ch] & {\sf void} function & \ref{gen-func}\\ print\_{\em phylum} & k.[ch] & {\sf void} function & \ref{gen-func}\\ fprintdot\_{\em phylum} & k.[ch] & {\sf void} function & \ref{gen-func}\\ fprintdotprologue & k.[ch] & {\sf void} function & \ref{gen-func}\\ fprintdotepilogue & k.[ch] & {\sf void} function & \ref{gen-func}\\ kc\_set\_fprintdot\_hashtablesize & k.[ch] & {\sf int} function & \ref{gen-func}\\ free\_{\em phylum} & k.[ch] & {\sf void} function & \ref{gen-func}\\ copy\_{\em phylum} & k.[ch] & {\em phylum} function & \ref{gen-func}\\ concat\_{\em list} & k.[ch] & {\em list} function & \ref{gen-func}\\ reverse\_{\em list} & k.[ch] & {\em list} function & \ref{gen-func}\\ length\_{\em list} & k.[ch] & {\sf int} function & \ref{gen-func}\\ last\_{\em list} & k.[ch] & {\em phylum} function & \ref{gen-func}\\ map\_{\em list} & k.[ch] & {\em list} function & \ref{gen-func}\\ filter\_{\em list} & k.[ch] & {\em list} function & \ref{gen-func}\\ &&& \\ {\em phylum} & k.h & datatype & \ref{gen-types}\\ Cons{\em list} & k.h & {\em Operator} & \ref{input-terms}\\ Nil{\em list} & k.h & {\em Operator} & \ref{input-terms}\\ sel\_{\em Operator} & k.h & enumeration constant & \ref{gen-types}\\ YYSTYPE & k.h & datatype & \ref{gen-types}, \ref{yl-io}\\ yt\_{\em phylum} & k.h & union selector for Yacc & \ref{gen-types}, \ref{yl-io}\\ kc\_size\_t & k.h & datatype & \ref{gen-func}\\ &&& \\ KC\_FUNCTIONS\_{\em file}& {\em file}.c & macro & \ref{gen-file}\\ {\em function} & {\em file}.[ch] & {\em result} function &\\ \hline \end{tabular} \normalsize \vspace{1cm} \small % hier moeten de kolommen nog een eigen font krijgen, kol 1,2 sf \begin{tabular}{|@{ ~~\sf}l@{ ~\sf}lll|} \hline \rm Name & File & Type & See also Section\\ \hline KC\_REWRITE & rk.c & macro & \ref{gen-file}\\ rewrite\_{\em phylum} & rk.[ch] & {\em phylum} function & \ref{gen-func}\\ rview & rk.h & datatype & \ref{gen-types}\\ &&& \\ KC\_UNPARSE & unpk.c & macro & \ref{gen-file}\\ unparse\_{\em phylum} & unpk.[ch] & {\sf void} function & \ref{gen-func}, \ref{unp-def}\\ uview & unpk.h & datatype & \ref{gen-types}\\ &&& \\ KC\_CSGIO & csgiok.c & macro & \ref{gen-file}\\ CSGIOread\_{\em phylum} & csgiok.[ch] & {\sf char*} function & \ref{gen-func}, \ref{sg-io}\\ CSGIOwrite\_{\em phylum}& csgiok.[ch] & {\sf char*} function & \ref{gen-func}, \ref{sg-io}\\ kc\_set\_csgio\_hashtablesize& csgiok.[ch] & {\sf int} function & \ref{gen-func}\\ kc\_set\_csgio\_sharing& csgiok.[ch] & {\sf boolean} function & \ref{gen-func}\\ \hline \end{tabular} \normalsize \subsection{Debugging Support} The generated code is organised in such a way that a symbolic debugger will be a useful tool for exploring data structures. Furthermore, by default the code is instrumented with {\sf assert}\index{assert@{\sf assert}} statements that check e.g. for references through nil pointers. In the same way as the Unix {\sf assert}, compiling with -DNDEBUG\index{DEBUG}\index{NDEBUG} effectively deletes the asserts from the code. (There are other macros whose redefinition can influence the behaviour of {\sf assert}.) A typical run time error message is `no default action defined'. This occurs when a {\sf with-} or {\sf foreach-with-statement} does not cover all cases, as explained in Section~\ref{func-def}. One of the situations in which this can occur is when such a statement is applied to an argument of the wrong phylum. Running {\sf lint} should have uncovered this, see Section~\ref{sec:lint}. \section{Running It} \label{sec:running} In the previous sections we presented the input and output of the term processor. This section discusses some aspects of using the system in a software-development process. In particular, we discuss the relation with other tools (such as {\sf yacc}) that are useful for this kind of software. \subsection{The Program and the Files} {\em Kimwitu} is invoked by, for example, the command {\sf kc file.k}. It then generates a number of files. Multiple {\sf file.k} arguments are permitted. To avoid superfluous recompilations when using {\sf make}, {\sf kc} will not overwrite a file with an identical file. See the discussion below (Section~\ref{makefile}) for an example makefile. The program {\sf kc} can give some error messages and warnings. Depending on the user-provided code, {\sf cc} and {\sf lint} can give additional error messages. In particular, {\sf lint} will typecheck all expressions involving phyla and operators. \subsection{A Makefile} \label{makefile} For the sake of convenience, we give here a typical {\sf makefile} for use with the term processor, {\sf yacc} and {\sf lex}. This example illustrates a naming convention: the input files are called {\sf file1.k} and {\sf file2.k}, all user-provided C code is in the file {\sf examplemain.c}, the {\sf yacc} input is in {\sf exampley.y}, and the {\sf lex} input is in {\sf examplel.l}. Note that the makefile makes extensive use of {\sf make}'s defaults, and that it attempts to avoid superfluous recompilations. \index{example!makefile} \lgrindfile{makefile.tex} \noindent This makefile is rather complicated, for the following reason. The target ({\sf \$\{IT\}}) depends on a number of object files, which depend on a number of {\sf .c} and {\sf .h} files, some of which depend on the {\sf .k} files. However, if {\sf example.k} is changed the term processor does not always change all {\sf .c} and {\sf .h} files, and we want to avoid recompilations of unchanged files. For this reason, an intermediate target {\sf kctimestamp} is introduced that `remembers' when {\sf kc} was last executed successfully. The last rule (for {\sf x.tab.h}) helps to avoid recompilations when {\sf yacc} overwrites the file {\sf y.tab.h} but doesn't change it. There is still a problem with this makefile. It assumes that if {\sf kctimestamp} is up to date the generated files are also accurate. The user can mess this up. How to adapt this to your situation? If you have a different number of {\sf .k} files, you will have to adapt the definition of the {\sf KFILES} macro. If you do not use {\sf yacc} you can remove the lines containing {\sf x.tab.h} (2 lines) and the filenames {\sf \$\{IT\}y.[yo]}. If you do not use {\sf lex} you can remove the filenames {\sf \$\{IT\}l.[lo]} and the loader option {\sf -ll}. In either case it is sufficient to remove the object file names from the definition of the macro {\sf ALLOBJS}. This is also true for other files that need not be included in the final program. For example, if no rewriting functions are used you can delete {\sf rk.o} from {\sf ALLOBJS} and it will not be compiled. \subsection{Using {\sf lint}} \label{sec:lint} Some static errors can be discovered by running {\sf lint} on the C files, whether they are written by the user or generated by {\em Kimwitu}. Most notably this will catch argument type mismatches and such. The most convenient way is to use the option {\sf -u} to suppress messages on functions not used. \subsection{Interfacing with Yacc and Lex} \label{yl-io} The first piece of advice is: try to avoid it, because the SG (SSL) structure file format is much more convenient. Notwithstanding that, it is fairly simple, and the term processor handles a lot of the machinery involved, such as defining a data type for the {\sf yacc} stack. The two important considerations in using {\sf yacc} are the difference between abstract syntax and concrete syntax, and handling terminal symbols with values, such as identifiers. Abstract syntax should be the input of the term processor and concrete syntax goes into {\sf yacc}. First a simple abstract syntax. \lgrindfile{yacc_abstract_syntax_example.tex} % lgrindupdate 16 -> 8 In the {\sf yacc} input file, concrete syntax is translated into abstract syntax using the {\em operator} functions. Note also the type annotation of concrete symbols, as in {\sf \%token $<$yt\_casestring$>$ ID}. This type is a union selector of the generated {\sf YYSTYPE}. \index{example!yacc input}\index{yacc input!example} \lgrindfile{yacc_input_example.tex} % lgrindupdate 16 -> 8, 40->28 The typical way of using {\sf lex}\index{lex@{\sf lex}} is to have the lexical analyser generate tokens for the parser and build values of the phylum {\sf casestring}. The following is the complete {\sf lex} input. \index{example!lex input}\index{lex input!example} \lgrindfile{lex_input_example.tex} \subsection{Interfacing with Structure Files and the Synthesizer Generator} \label{sg-io} The functions {\sf CSGIOread\_{\em phylum}} and {\sf CSGIOwrite\_{\em phylum}} read and write structure files that are fully compatible with the structure files produced by Synthesizer Generator editors\footnote{This is the, unattributed, ASCII SSL V3 format, which is the structure file format from SG version 2 onwards.}. The correspondence between the primitive phyla is indicated in the following table. \begin{tabular}{@{ \sf}c@{\hspace{0.5cm}\sf}c} \rm SSL primitive phylum & Kimwitu primitive phylum\\ \hline INT & \sf int \\ STR & \sf casestring \end{tabular} To convert an SSL abstract syntax to a Kimwitu abstract syntax, one has to replace the occurrences of primitive phyla. It is convenient, but not essential, to also change SSL list phyla into Kimwitu list phyla, which can involve a renaming of SSL operators. Since structure files encode a term directly on a file, a concrete syntax is not necessary. They have a number of properties that make them particularly suitable for interfacing separate programs, generated by either the SG or Kimwitu. These programs do not have to be generated from exactly the same term description in order to be able to communicate through structure files. In the following we call the structure file format (format for short) {\em insensitive} for such differences in the term description if this is the case. The format is insensitive for attributes because attribute values do not appear in the file. The format is insensitive for renaming of phyla, but not for renaming of operators. It is also sensitive for changes in the definition of operators such as adding or deleting component phyla. The format is insensitive for the definition or existence of operators that do not appear in the term to be written. This is essential because a tool would want to have `local' phyla definitions. The format is also insensitive for storage options. Whether or not a term is shared internally is dependent only on the definition of the phyla at the reader program. Below we give an example of usage of structure files. The abstract syntax is as follows: \index{example!structure file usage} % identical \lgrindfile{structure_file_abstract_syntax_example.tex} \lgrindfile{yacc_abstract_syntax_example.tex} % lgrindupdate 16 -> 8 A structure file on this phylum could be written with the following code. \lgrindfile{csgoutmain.tex} The reading of such a structure file then looks like this. \lgrindfile{csginmain.tex} \section{Cookbook} \label{sec:cook} The term processor {\em Kimwitu} supports a number of styles of programming functions over trees. Such styles are sometimes called {\em paradigms}\index{paradigms}, and we can therefore say that the term processor supports {\em multi-paradigm programming}. In this section we try to substantiate this claim by presenting some examples. Each of these examples highlights a particular paradigm, some also show how paradigms can be mixed. The most fundamental paradigm of course is that of trees, and in particular trees in which nodes of various types appear. This structure appears in a number of ways in computer science. {\em Parse tree} nodes correspond to the terminal and non-terminal symbols of a context-free language, or more precisely, production instances of those symbols. Normally an abstraction of parse trees is used, in which irrelevant terminal symbols and non-terminal symbols are eliminated. Such trees are usually called {\em abstract syntax trees}. In abstract data type theory, trees denote {\em expressions} and a normal form of an expression denotes its {\em value} in a certain sense. Trees can also represent a prescription of the computation of a value, or more general, a {\em program}. The reason for mixing paradigms is that we want to exploit the strong points of each paradigm, while at the same time avoiding their weak points. Examples of such weak points are the following. Attribute grammars allow only attributes to be computed over trees, where the computation can not have a circular dependency. Functional programming languages do not allow side effects to be expressed. Abstract data type rewrite systems can only rewrite terms to their normal form. Conventional programming languages, such as C, usually force the programmer to be fairly aware of the representation of data types. \subsection{Structural Induction} Perhaps the most straightforward style of computing a value over a tree is by {\em structural induction}. The result of a tree is computed as a function of the results of its subtrees. The simplest example of such a function is equality of trees. Two trees are equal if the top nodes have the same structure and all the corresponding subtrees are equal. This function is so common that it is always generated by the term processor. Only one click less trivial is the function to compute the number of leaves of a tree. An example of that functions follows. The base case of {\sf nroftips} simply returns 1, and the structural induction case just adds the results of the components. \index{example!structural induction} \lgrindfile{funny.tex} % lgrindupdate phydef 16 -> 8 ??? \noindent In general one writes a function for every phylum that occurs in the tree. \subsection{Unparsing} \label{cook-unp} Unparsing is in some ways a special case of structural induction. An example of an unparsing definition has been given in Section~\ref{unp-def}. In this subsection we show how a printer function can look that implements indentation. The idea is to define special control sequences to indicate the increment or decrement of indentation. We have chosen these control sequences in accordance with the SSL style. For example, the string \lgrindfile{unparse_control_sequences.tex} \noindent should be printed as follows. \lgrindfile{unparse_output.tex} An example of a printer function to do that is the following: \index{example!printer function}\index{printer function!example} \lgrindfile{printer_function_with_indentation.tex} The approach above has an advantage that is at the same time a disadvantage. The {\sf printer} function also prints {\sf casestring} and {\sf nocasestring} terms, which has the advantage that it is possible to dynamically adapt the indentation, simply by assigning an unparsing control sequence to a {\sf casestring} and {\sf nocasestring} variable before it will be unparsed. The disadvantage is that one has to be careful that {\sf casestring} and {\sf nocasestring} terms do not contain unexpected unparsing control sequences - it may be necessary to quote the `escape character' (the character \% in the example above) in {\sf casestring} and {\sf nocasestring} terms, unless the `escape character' does not normally appear in the {\sf casestring} and {\sf nocasestring} terms. The following alternative does not have this problem - but neither has it the advantage of `dynamic' control sequences. The idea is to use unparse {\sf views} instead of control sequences - remember that the {\sf printer} function was passed both a string and a view argument. Instead of a single string, a number of strings are unparsed, some of which have {\em :view} postfix - these strings are only meant to invoke the printer function with a `control view'. To reduce the number of printer calls, the `control views' can be combined with the `normal' strings. This is left as an exercise for the reader. %\lgrindfile{unparse_view_sequences.tex} %The adapted printer function looks as follows: %\lgrindfile{printer_function_with_view_indentation.tex} % In the following example the printer function recognizes newline characters ('$\backslash$n'): \lgrindfile{unparse_control_view_sequences.tex} The printer function: \lgrindfile{printer_function_with_control_view_indentation.tex} If instead of newline characters a newline view would be used (eg. {\sf "":v\_nl}) the printer function could be simplified even more: the {\sf case '$\backslash$n':} code in the {\sf switch(c)} can then be placed as {\sf case v\_nl:} in the {\sf switch(v)}, and the code for the {\sf default:} case in that {\sf switch(v)} can be replaced by a simple {\sf printf("\%s",s); }. \subsection{Attribute Grammars} \label{cook-ag} Structural induction is a special case of attribute grammars, where there is only one pass, computing only synthesized attributes. This is not the place to give an overview of attribute grammars. Suffice it to say that each node, or term, is decorated with a number of {\em attributes}, of which the value is computed from the values of the subterms of the node (synthesized attributes) or from the encompassing node (inherited attributes). An evaluation scheme walks a tree a number of times to compute all the attributes. Two schemes are demonstrated here. The example is from the original paper on attribute grammars\cite{knuth-ag}, and computes the value of a fractional binary number, e.g {\sf 1101.01}. First the abstract syntax. \index{example!attribute grammar} \lgrindfile{knuth1.tex} % lgrindupdate 16 -> 10, 32 ->24 The first example of an evaluation scheme derives from the observation that each phylum occurrence can be viewed as a set of functions, each of which computes a synthesized attribute from the inherited attributes. So, there is a set of functions {\sf eval\_}{\em phylum\_synthesized\_attr}. Each rule for a synthesized attribute corresponds to a case in one of these functions, and each rule for an inherited attribute appears as a parameter of one of the calls to these functions. The function invocation structure is isomorphic to the attribute dependency graph. This scheme can also be characterised as a {\em demand-driven} scheme. There are at least two problems with this approach. First, an inherited attribute of a phylum can be dependent on a synthesized attribute of that phylum. For example {\sf bitstring\_scale} depends on {\sf bitstring\_length}, and the computation of {\sf bitstring\_length} can therefore not have the {\sf scale} as an argument. An analysis of the attribute dependencies is necessary to prune the argument lists of the functions. Second, as each used occurrence of a synthesized attribute is represented as a call to the corresponding function, attributes may be evaluated more than once. This is of course the other side of not storing results in the tree. \lgrindfile{knuth2.tex} The second example of an evaluation scheme visits the tree a number of times and computes at each visit of a node all the attributes that can be computed. In the implementation there are procedures called {\sf pass}{\em number\_phylum}. In our example there are two passes. In the first pass the attribute {\sf length} is computed, and in the second pass the other attributes. Again, this scheme has its disadvantages. The allocation of attributes to passes has to be derived from an analysis of the attribute dependencies. Second, in comparison with the previous scheme, this one represents the opposite time/space trade-off. No attribute is evaluated more than once, but at the expense of storing all intermediate results. Finally, this scheme does not coexist very well with unique storage\index{uniq phylum!inherited attributes}\index{non-uniq phylum!inherited attributes} of phyla that have inherited attributes. Two occurrences of a phylum cannot be shared if they have different inherited attributes. \lgrindfile{knuth3.tex} Here follows for the sake of completeness the main program to call the attribute evaluations. \lgrindfile{knuth4.tex} The examples illustrate that the term processor does not prescribe a particular evaluation scheme. The advantage is that schemes can be mixed at liberty, and can even be combined with non-attribute grammar paradigms. The disadvantage is of course that evaluation order, e.g. in pass allocation, has to be determined manually, or by using some other tool. Conceivably a system can be made to generate term processor input from an attribute grammar. \subsection{Abstract Data Types and Rewrite Systems} The following example illustrates an abstract data type (ADT) style of programming functions. The data type defined here is the type of natural numbers. In ADT theory there is usually no difference between {\em constructors}, which make up a term in normal form, and {\em functions}, which can be applied to terms. The difference between these two is only a property of the rewrite system. In the phylum, both of them are operators. \index{example!abstract data type} % \lgrindfile{nats.tex} \lgrindfile{nats_newer.tex} % lgrind -T 4; lgrindupdate 16->13, 40->36, 60->52 Here is the program to call the function. We build a term corresponding to the application of {\sf ack}, and then rewrite this into normal form. \lgrindfile{natsmain.tex} \subsection{Rewrite Systems and Functions} Combinators are a technique for implementing functional languages. This example, combinator reduction\index{combinator reduction}, illustrates the use of function calls in the right-hand side of an equation, as well as a few small {\sf yacc} and {\sf lex} techniques. The abstract syntax and the rewrite rules are as follows. \index{example!abstract data type}\index{abstract data type!example} \lgrindfile{ski.tex} Note that the operator {\sf num} refers to a built-in type {\sf int}, which the term processor maps to C. In the last rewrite rule, a C function {\sf cplus} is called on values of this type. This function is defined in the main program, as follows. \lgrindfile{skimain.tex} In the {\sf yacc} input some of the more mundane details of forcing the `right' associativity are illustrated, watch the lines that start with {\sf \%left}. \index{example!yacc input}\index{yacc input!example} \lgrindfile{skiy.tex} Finally, the minimal {\sf lex} input, which shows how to read and convert an integer properly, and how to make the keywords case insensitive. \index{example!lex input}\index{lex input!example} \lgrindfile{skil.tex} The program that is generated from this, reads and reduces combinator expressions. For example, the input {\sf s~k~i~1}, which is really the identity operation applied to {\sf 1}, yields {\sf num(1)}. The input {\sf s(s(k+)i)(k1)8}, which illustrates the increment operation, yields {\sf num(9)}. \subsection{Memo Functions} Memo functions remember (`memorize') their results. If called again with the same arguments, they will return the remembered value. Memo functions are functional in their behaviour: a subsequent call with the same argument will yield the same result. In their performance they are not functional: the subsequent call will not need recomputation. Memo functions of course constitute a time/space trade off\index{time/space trade off}. Their performance comes at the expense of memory to store the results (and, in some schemes, memory to store the operands). Using the term processor, memo-functions of one argument can be implemented as an attribute of the phylum of the argument term. Memo-functions of more than one argument can be implemented as an attribute of a uniquely represented term that represents the function call. E.g. for a function {\sf F} of two arguments one introduces a term {\sf F\_memo(x,y)} of which the function result is an attribute. In both approaches it is essential that the arguments of the function are represented uniquely. An example to illustrate memo functions is the Fibonacci function. This is a good example because the standard recursive definition recomputes the same result over and over again. For example, fib(5) uses fib(4) and fib(3), but for fib(4), fib(3) is also computed. It is also a silly example, because the best solution is a simple iteration. Furthermore we use abstract data type natural numbers, and the cost of the rewrite functions outweighs the costs of Fibonacci. The non-memo solution looks as follows, the phylum and rewrite definitions are from the previously discussed natural numbers example. \index{example!Fibonacci}\index{Fibonacci!example} \index{example!memo function} \index{example!uniq phylum} \lgrindfile{fibonacci.tex} The memo version looks as follows, the natural number phylum is made unique and has an attribute {\sf fib} to store the result. \lgrindfile{fibonacci_memo.tex} Note the initialisation of the attribute {\sf fib}. We take the nil pointer to mean `no value known yet'. In the second line of the function body the test is made for this, and in the second last line the result is stored. Measurements show that computing fib(15) (which is 987) takes 1973 calls on {\sf fib}. In {\sf fibm} the with-statement is entered only 16 times. However, as stated, using rewriting to compute addition makes this hardly noticeable on total run time. Both functions compute {\sf plus(377, 610)} exactly once, and this takes most of the time. \subsection{Beyond Symbol Tables} Attributes of uniquely stored terms can be used to implement symbol tables,\index{symbol tables} or more exactly, the contents of symbol tables. Looking up translates to newly creating a term (which is represented uniquely) and then inspecting its attributes. One can view this as making the look up function a memo function. The nice thing is that entire terms can be used as the key in the `symbol table'. This is useful for e.g. nested scopes. A key can then be a term composed of an identifier and a scope indication. This tuple should be unique. As an example, we consider the detection of common subexpressions. Every subexpression is a key in the symbol table, and one pass over the expression computes the attribute {\sf ocs}, which represents the number of occurrences of each subexpression. \index{example!symbol table}\index{symbol table!example} \index{example!uniq phylum}\index{uniq phylum!example} \lgrindfile{comsub.tex} The example also illustrates a technique through which all values of a particular phylum in the symbol table can be accessed. The idea is that they are strung together using an attribute (here {\sf next}) and a global variable (here {\sf alltrees}), which are manipulated in the initialisation part of a term. The other essential components of this technique are the initialisation of the global variable, and the inclusion of its definition in all the files through {\sf KC\_TYPES\_HEADER}. This technique also works for non unique phyla. Unique phyla should not be used when some of the attributes depend on the context of their nodes. \index{uniq or not unique usage} %% \subsection{Storage Options} %% \label{cook-uniq} %% Usually, we want to free intermediate results of a computation as soon as %% we have the final results. %% However, by default, `uniq'-ly created terms cannot be freed. %% The solution is a more general `storage-class' concept, that offers %% hooks for the manipulation of the hashtables that implement the `uniq' %% storage option. %% Because often not all `uniq' terms should be treated in the same way, %% alternate storage classes can be used. %% All phyla that should have the properties of `uniq' phyla should be %% declared as belonging to exactly {\em one} storage class. %% The default storage class is {\em uniq}, but other can created as needed, %% by declaring them in a %% %storageclass %% declaration. %% All phyla that should be treated in the same way, eg. be freed at the same time, %% should be declared as belonging to the same storage class. %% %% The storage classes can be managed as follows. %% Hashtables can be created as needed, and bound (`assigned') to %% one or more storage classes. %% The hashtables can be freed, or replaced by other hashtables (by re-assigning). %% %% A more subtle mechanism than freeing is available: a `hash-level' system %% makes it possible to free only those terms that were created since the %% last time the hash-level was increased. \section{Design Considerations for {\em Kimwitu}} \label{sec:design} In this section we discuss some of the considerations that have shaped the design of {\em Kimwitu}. This should provide the reader with some background information on the `philosophy' of the system. % for a conference paper, we'll start with a discussion of design goals % in the conclusion we will then see an evaluation of these goals and possible extensions \subsection{Why a Type per Phylum?} A fundamental choice is between a node type per phylum, as is done in {\em Kimwitu}, or a generic node type (the term structure to describe all term structures). This difference is comparable to that between compiled code and interpreted code. The first representation option is more efficient to use, it seems, but results in much more C-code (it might be different if assembly is generated directly). The second option is more suitable for language independent or polymorphic operations (as is used in the kernel of the Synthesizer Generator). An additional benefit of the first option is that it allows compile-time type checking of user-added code, as each phylum will correspond to a different type. \subsection{What is in a Name?} How are the various objects generated by the system named? The problem is that for each input name (e.g. of a phylum or operator) a number of identifiers in the output are generated (e.g. for an operator a name is generated to distinguish it from other operators, and a name for creating a term with that operator etc.). The basic idea is that related concepts have related names. In natural languages a comparable situation exists. For example, in English the words norm, normal, normalcy, normality, normalization, normalize, normalized, normalizes, normalizeth, normalizing, normally, and normalness, denote different but related forms of one word. These other words are called inflections, and are constructed, in most western languages, by changing suffixes. In other languages, e.g. Swahili\index{Swahili}, prefix changing is also used. In Swahili, the word {\em witu} means tree, its plural, meaning forest or jungle, is {\em mwitu} . The prefix {\em ki-} indicates a likeness of being, so that the name of our system reads as tree-s-ish. (This may not sound like English to you. Well, Swahili speakers don't count {\em Kimwitu} as a legal word either...) The same scheme is employed in programming languages. For example, in Algol-68 and C, `proc()' denotes the result of calling a parameterless function and `proc' denotes the function itself. An example from the term processor: the function to rewrite a phylum {\sf foo} is called {\sf rewrite\_foo}. \subsection{What is the Place in Software Engineering?} In the engineering of language based software, there are two extreme approaches. One is to just use a regular programming language (assembly language is not used very much any more these days), the other is to use a very high-level formalism, such as attribute grammars. {\em Kimwitu} was designed to fill a place in between these extremes, and to attempt to bridge the gap. The main theme is that by supporting {\em multi-paradigm programming}\index{paradigm} one can avoid being locked in by one formalism. In each formalism there are things that cannot be easily or efficiently expressed. Abstract data types can allow easy expression of certain functions, but the implementations are not necessarily efficient. In C every detail that can possibly influence performance is under the control of the programmer, but that does not necessarily allow easy expression of conceptually simple functions. %the concept of free types and free representations. \section{Acknowledgements}\index{acknowledgements} Helpful comments on previous versions of this manual where made by Bart Botma, Henk Eertink, Robert Elbrink, Rob Gansevles, Eric van Hengstum, Gerrit van der Hoeven, Anneke Kleppe, Matthijs Kuiper, Albert Nijmeijer, and Jan Tretmans. \appendix \section{Syntax of the {\em Kimwitu} input} In this section we present the syntax of the {\em Kimwitu} input. The notation is based on {\sf yacc}, with the following extensions. Single quotes are used for string denotations, ordinary parentheses () denote grouping, square brackets {[]} denote zero or one instance, curly brackets \{\} denote zero or more instances. Comments are in C style, e.g. enclosed between {\sf /*} and {\sf */}, but can be nested. Some of the rules and conditions are expressed in prose, rather than by formal means. An identifier ({\sf ID}) is a sequence of letters digits and underscores, not starting with a digit. \\[2ex] \lgrindfile{kimwitu_syntax.tex} %lgrindupdate 32 -> 24 \section{Structure File Encoding} This is a description of the ASCII SSL V3 format as used by Grammatech's Synthesizer Generator and the term processor Kimwitu (University of Twente). (Actually, both of these are generators of programs using this representation.) Not all features are described, e.g. other atomic phyla are possible in Synthesizers, but not in combination with Kimwitu. %% Not all described features are implemented in all tools. %% E.g. Kimwitu generated programs will not write files with sharing (yet), although they can %% read them. The structure file format is an ASCII encoding of a term. It is a prefix representation of this term. A file representation has two major components. The first component is a table of the operators that are used in the term, operators not appearing in the term do not have to be contained in this table. The operators in this table are numbered, starting from 0. The second component of the file is a representation of the term (or object as it is called). The begin markers of these components, {\sf \$operators} and {\sf \$object}, must be followed by one space at the end of the line. Each line in the operator table describes the properties of one operator, and has 4 fields. The fields in the line are separated by one space. They are: the operator name, the number of its operands, the number of attributes (in term processor generated files this is always 0), and an indication of whether the operator belongs to an atomic phylum (1=yes, 0=no). In the object section each line represents one node in the term. There are four types of these nodes: operator applications, string constants, integer constants, and pointers to shared nodes. A line beginning with a number denotes an operator application, it is an index in the operator table. The rest of this line, if any, can be ignored (it contains an alternative unparsing indication for some tools). An operator of an atomic phylum is followed, on the next line, by a representation of a value. A plus sign (+) denotes a string, and is followed by a number indicating the number of characters in the string, a separating space, and the string. In the string, all printable characters except the backslash ($\backslash$) are represented as such. A backslash is doubled, and a non printable character is represented as a backslash followed by a hexadecimal representation of the ascii number, e.g. newline is $\backslash${\tt oa}. An integer is represented as the string encoding of its decimal representation. Pointers serve to share trees, and strings (but not integers). Such a pointer is encoded in a base-64 representation, in which the character {\tt :} represents the `digit' 0, and the character {\tt y} represents the `digit' 63. Intermediate `digits' are represented by the intermediate ascii characters. For example, the string {\tt ;=} denotes the value 67. Conceptually at least, the result of each operator application is stored in one table, and each string is stored in a second table. The two numbers on the line below the begin marker {\sf \$object} give the size of the operator application resp. the string table. The pointer value is then a reverse index (counting from the end back) in the appropriate table of values. E.g. the last value of the appropriate kind in the file before the pointer has number 1. The following two examples illustrates the structure file format. Both examples show the same term; the first example contains no sharing, the second everything possible is shared. Also, in the second example the operator table in the operators section has been sorted (decreasingly) on the number of operator applications. As a result, the references to this table should be smaller (lower numbers), taking less characters for their representation. {\small \begin{tabular}{l} A\#S\#C\#S\#S\#L\#V\#3 {$\leftarrow$ \em magic word indicating file type}\\ \$operators {$\leftarrow$ \em begin marker operator table}\\ CR\_Spec 12 0 0 {$\leftarrow$ \em operator 0}\\ CR\_Label 1 0 0\\ \_Str 0 0 1 {$\leftarrow$ \em operator of an atomic phylum}\\ Nilcr\_comment\_list 0 0 0\\ CR\_Specification\_id 1 0 0\\ CR\_Identifier 2 0 0\\ NoCaseStr 0 0 1\\ CR\_DefExtension 1 0 0\\ Nilcr\_gate\_identifier\_list 0 0 0\\ Nilcr\_identifier\_declaration\_list 0 0 0\\ CR\_Noexit\_part 0 0 0\\ Nilcr\_data\_type\_definition\_list 0 0 0\\ CR\_Definition\_block 3 0 0\\ CR\_Stop\_expression 2 0 0\\ Nilcr\_annotation\_list 0 0 0\\ Nilcr\_process\_definition\_list 0 0 0\\ CR\_Booleans\_NotChecked 0 0 0\\ CR\_IS8807 0 0 0\\ \$object {$\leftarrow$ \em begin marker object} \\ 24 4 {$\leftarrow$ \em number of operator applications; number of strings}\\ 0 {$\leftarrow$ \em operator application, index in table above}\\ 1\\ 2\\ +10 specname\_1 {$\leftarrow$ \em string representation}\\ 3\\ 4\\ 5\\ 6\\ +8 specname\\ 7\\ 6\\ +1 0\\ 8\\ 9\\ 10\\ 11\\ 3\\ 12\\ 13\\ 1\\ 2\\ +6 stop\_0\\ 14\\ 11\\ 15\\ 16\\ 14\\ 17 \end{tabular} } {\small \begin{tabular}{l} A\#S\#C\#S\#S\#L\#V\#3 {$\leftarrow$ \em magic word indicating file type}\\ \$operators {$\leftarrow$ \em begin marker operator table}\\ CR\_Label 1 0 0\\ NoCaseStr 0 0 1 {$\leftarrow$ \em operator of an atomic phylum}\\ \_Str 0 0 1 {$\leftarrow$ \em operator of an atomic phylum}\\ Nilcr\_identifier\_declaration\_list 0 0 0\\ CR\_Spec 12 0 0\\ CR\_IS8807 0 0 0\\ Nilcr\_comment\_list 0 0 0\\ CR\_Booleans\_NotChecked 0 0 0\\ CR\_DefExtension 1 0 0\\ CR\_Specification\_id 1 0 0\\ CR\_Definition\_block 3 0 0\\ Nilcr\_process\_definition\_list 0 0 0\\ CR\_Identifier 2 0 0\\ Nilcr\_gate\_identifier\_list 0 0 0\\ Nilcr\_annotation\_list 0 0 0\\ CR\_Noexit\_part 0 0 0\\ Nilcr\_data\_type\_definition\_list 0 0 0\\ CR\_Stop\_expression 2 0 0\\ \$object {$\leftarrow$ \em begin marker object}\\ 21 4 {$\leftarrow$ \em number of operator applications; number of strings}\\ 4 {$\leftarrow$ \em operator application, index in table above}\\ 0\\ 2\\ +10 specname\_1 {$\leftarrow$ \em string representation}\\ 6 {$\leftarrow$ \em shared node referenced below as label\_1}\\ 9\\ 12\\ 1\\ +8 specname\\ 8\\ 1\\ +1 0\\ 13\\ 3\\ 15\\ 16 {$\leftarrow$ \em shared node referenced below as label\_2}\\ D {$\leftarrow$ \em reference to label\_1 above}\\ 10\\ 17\\ 0\\ 2\\ +6 stop\_0\\ 14 {$\leftarrow$ \em shared node referenced below as label\_3}\\ @ {$\leftarrow$ \em reference to label\_2 above}\\ 11\\ 7\\ = {$\leftarrow$ \em reference to label\_3 above}\\ 5 \end{tabular} } \section{Compatibility with previous versions of Kimwitu} During the (ongoing) development of Kimwitu new ideas lead to changes in the software. Most of those changes are invisible to the Kimwitu user, some of them are not. This section lists a number of visible changes in Kimwitu. \subsection{Define-before-use constraints} In older versions of Kimwitu, both {\em phyla} and {\em operators} had to be defined before their use in a pattern such as appears in function definitions and rewrite rules. More recent versions of Kimwitu no longer impose this constraint (version {\sf V3\_0} or newer). \subsection{Node-sharing in Structure Files} From version {\sf V3\_4} on the {\sf CSGIOwrite} routines use node-sharing features. Older kc-generated programs can not read files that use node-sharing; of course, new kc-generated programs can read files without it. For compatibility with older programs, the use of node-sharing for {\em writing} files can be turned off. The routine {\sf kc\_set\_csgio\_sharing}\index{kc_set_csgio_sharing@{\sf kc\_set\_csgio\_sharing}} can be called with a boolean argument {\sf False} to make the {\sf CSGIOwrite} routines do {\em not} use the node-sharing. The routine returns the current csgio node-sharing setting. In addition, if the preprocessor symbol {\sf KC\_CSGIO\_NO\_SHARING}\index{KC_CSGIO_NO_SHARING@{\sf KC\_CSGIO\_NO\_SHARING}} is defined when the generated {\sf csgiok.c} file is compiled, the {\sf CSGIOwrite} routines do {\em not} use the node-sharing. Defining this macro does {\em not} turn off the ability to {\em read} files that contain node-sharing. \subsection{The UNIQMALLOC2 Macro} This macro is named {\sf UNIQMALLOC2} for backward compatibility with previous versions of {\sf kc} that defined and used a macro {\sf UNIQMALLOC} that referred to a {\em unary} function. {\sf UNIQMALLOC2} refers to a {\em binary} function. \subsection{The {\sf KIMW\_} Redirection Symbols and Macros} In previous versions of Kimwitu the redirection symbols and macros had a {\sf KIMW\_} prefix. Because these names did not fit in the Kimwitu naming scheme, the redirection symbols and macros now have a {\sf KC\_} prefix. The `old' redirection keywords are still recognized by Kimwitu, but a warning is given. \subsection{The {\sf view} Enumerated Type} In previous versions of Kimwitu only unparse rules had {\em views}, which were collected in a enumerated type {\sf view}. For backwards compatibility, {\sf view} is now defined as a {\em typedef} for the enumerated type {\sf uview}. \section{Future} This section contains some of the ideas for future changes, enhancements of Kimwitu. The purpose of this section is twofold: it can be seen as a sort of `advance warning' for potential future incompatible changes, and, because it contains material that is not completely mature, it can be seen as a `request for feedback' from {\em You}, the Kimwitu users. \subsection{CSGIO Structure File IO Routines} Possible future change: changing the names of the structure file functions (because the uppercase {\sf CSGIO} prefix doesn't really fit in the naming scheme) and supporting more formats. \subsection{Conditional Rewrite Rules} In future versions, it will be possible to use conditional rewrite rules. The exact Kimwitu input syntax has not yet been decided. \subsection{User Defined Atomic Phyla} In future versions of Kimwitu, it will be possibly to specify your own atomic phyla. %The syntax for an atomic phylum specification will likely be close %to the following: %{\em to be provided.} The exact Kimwitu input syntax has not yet been decided. \subsection{Generation of C++ code} Future versions of Kimwitu will be able to generate C++ code, and accept a more C++ like syntax for function definitions. The move towards C++ will be gradually, with the following likely mile-stones: generation of C code that can be compiled as C++ code, generation of overloaded C++ functions for the standard functions, generation of CSGIO routines that support the C++ file io mechanisms, \subsection{Support for Sets, Queues, Stacks, Arrays, etc.} Once Kimwitu has the power of C++, it may be possible to support more `basic' data types. \subsection{Polymorfism} Extend the Kimwitu input syntax with a mechanism to define some kind of template functions over lists. \subsection{Hash Management} In addition to the routines described in ~\ref{gen-func} the routines {\sf kc\_ht\_static}, {\sf kc\_ht\_dynamic}, {\sf kc\_ht\_inc\_level}, {\sf kc\_ht\_dec\_level}, and {\sf kc\_ht\_free\_level} have been added as an experiment. Right now it is unclear whether they are useful or not, or whether this sort of features should be offered in Kimwitu, or are better left to the individual programmer. A compromise might be to supply such routines as a kind of library in the form of a Kimwitu input (.k) file. Controlling the lifetime of unique memory cells by the creation, freeing and (re-) assignment of hashtables has one disadvantage: the uniqueness of storage property is no longer guaranteed. The following alternative does not have this flaw, but may be slower in execution time. The idea is to treat the hashtable as a stack of life-times, called segments, or memory-levels. New memory cells will always be created in the memory segment at the top of the stack. A new segment can be pushed onto the stack, eg. to store intermediate results of a computation. When the computation is finished, the segment can be popped from the stack; its memory cells remain alive. When now the final result of the computation is copied, its memory cells in the popped segment are re-created in the now topmost segment. Finally, the popped segment can be deleted. For each hashtable such a stack of segments can be used, and pushing and popping the segments of one hashtable does not influence the segments of another one. In addition to the segments on the stack, there is one `heap' segment, that is meant for memory cells that never will be reclaimed. In Kimwitu, we call the heap segment `static', and the stack segments `dynamic'. Kimwitu offers two routines to switch between the use of the `static' segment and the `dynamic' segments. By default, all unique memory cells are created in the `heap' segments of the hashtables, ie. by default the `static' segments are used. The routines that `manage' the segments (pushing, popping, freeing, switching between `static' (heap) and `dynamic' (stack)), and the routines that manage the hashtables (creation, assignment, freeing, deletion, reuse) can be freely intermixed, to use the best of both approaches. Below follows a description of the memory level routines. \fbox{\sf void kc\_ht\_static( kc\_storageclass\_t sc ); } \index{kc_ht_static@{\sf kc\_ht\_static}} Allocate everything using the `static' scheme, until {\sf kc\_ht\_dynamic} is called for this storage class. This is the default. \fbox{\sf void kc\_ht\_dynamic( kc\_storageclass\_t sc );} \index{kc_ht_dynamic@{\sf kc\_ht\_dynamic}} Allocate everything using the `dynamic' scheme, until {\sf kc\_ht\_dynamic} is called for this storage class, or until the hashtable is changed (by assignment, deletion, clearing or reuse). \fbox{\sf void kc\_ht\_inc\_level( kc\_storageclass\_t sc );} \index{kc_ht_inc_level@{\sf kc\_ht\_inc\_level}} Increment the memory level: everything allocated in hashtable {\sf ht} using the `dynamic' scheme will be freed during subsequent {\sf kc\_ht\_dec\_level}, {\sf kc\_ht\_free\_level} calls. \fbox{\sf void kc\_ht\_dec\_level( kc\_storageclass\_t sc ); } \index{kc_ht_dec_level@{\sf kc\_ht\_dec\_level}} Decrements the memory level: everything allocated using the `dynamic' scheme in the previous, higher, memory level is still available, but can no longer be found using the hashtable, ie. copying a node that was created in the previous, higher, memory level will yield a new copy of that node (and the initialization code of its phylum will be executed). \fbox{\sf void kc\_ht\_free\_level( kc\_storageclass\_t sc ); } \index{kc_ht_free_level@{\sf kc\_ht\_free\_level}} Free all nodes/elements allocated during previous, higher, memory levels. \newpage \addcontentsline{toc}{section}{References} \pdfbookmark[1]{References}{refs} \bibliography{tp.man} \bibliographystyle{alpha} \addcontentsline{toc}{section}{Index} \pdfbookmark[1]{Index}{index} \printindex \end{document} kimwitu-doc-10a+1/license.tex0000444000114400011300000000336207076562232016053 0ustar piefelsimulantKimwitu, a system that supports the construction of programs that use trees or terms as their main data structure. \begin{center} Copyright \copyright 1989-1998 Axel Belinfante, University of Twente, Enschede, The Netherlands.\\ All Rights Reserved \end{center} This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but \textsc{without any warranty}; without even the implied warranty of \textsc{merchantability} or \textsc{fitness for a particular purpose}. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Output of Kimwitu (and of works based on Kimwitu) can be used without restriction (even though this output may contain fragments from the source code of Kimwitu (or of works based on Kimwitu), with one notable exception (because Kimwitu is built using itself): The output generated by Kimwitu (and by works based on Kimwitu) from the source code of Kimwitu (and from the source code of works based on Kimwitu) is covered by the GNU General Public License. The structure-file-io reading and writing code has been derived, in part, from The Synthesizer Generator$^{\textsc{\small{tm}}}$, a product of GrammaTech, Inc, copyright \copyright 1991, GrammaTech, Inc. Used with permission. GrammaTech and Synthesizer Generator, are tradenames of GrammaTech, Inc., One Hopkins Place, Ithaca, NY 14850, (607) 273-7340. kimwitu-doc-10a+1/tp.man.bib0000444000114400011300000000354007076562233015561 0ustar piefelsimulant @BOOK{ehrig:act1, AUTHOR = {H. Ehrig and B. Mahr}, ADDRESS = {Berlin}, PUBLISHER = {Springer-Verlag}, TITLE = {Fundamentals of Algebraic Specification 1}, YEAR = {1985} } @BOOK{csg:refman, AUTHOR = {T. Teitelbaum and T. W. Reps}, ADDRESS = {New York}, PUBLISHER = {Springer-Verlag}, TITLE = {The Synthesizer Generator Reference Manual: Third Edition}, YEAR = {1989} } @BOOK{csg:book, AUTHOR = {T. Teitelbaum and T. W. Reps}, ADDRESS = {New York}, PUBLISHER = {Springer-Verlag}, TITLE = {The Synthesizer Generator - A System for Constructing Language-Based Editors}, YEAR = {1989} } @ARTICLE{knuth-ag, AUTHOR = {Donald E. Knuth}, JOURNAL = {Mathematical Systems Theory}, NOTE = {A correction appears in vol. 5 pp95-96}, PAGES = {127-145}, TITLE = {{Semantics of Context-Free Languages}}, VOLUME = {2}, YEAR = {1968} } @ARTICLE{csg:sigplan, AUTHOR = {T. Teitelbaum and T. W. Reps}, JOURNAL = {SIGPLAN}, NUMBER = {5}, PAGES = {42-48}, TITLE = {The Synthesizer Generator}, VOLUME = {19}, YEAR = {May 1984} } @MISC{graphviz, AUTHOR = {John Ellson and Eleftherios Koutsofios and Stephen North}, NOTE = {\\ URL: http://www.research.att.com/sw/tools/graphviz/}, TITLE = {graphviz -- tools for viewing and interacting with graph diagrams.} } kimwitu-doc-10a+1/phylum_example.tex0000444000114400011300000000047707076562233017467 0ustar piefelsimulant% Remember to use the lgrind style \File{phylum\_example.k},{12:12},{Jan 17 1997} \L{\LB{\V{expr}:}\Tab{8}{\V{Plus}(\V{expr}_\V{expr})}} \L{\LB{\|}\Tab{8}{\V{Minus}(\V{expr}_\V{expr})}} \L{\LB{\|}\Tab{8}{\V{Neg}(\V{expr})}} \L{\LB{\|}\Tab{8}{\V{Zero}()}} \L{\LB{;}} \L{\LB{}} \L{\LB{\V{exprlist}:_\K{list}_\V{expr};}} kimwitu-doc-10a+1/list_phylum_example.tex0000444000114400011300000000032107076562233020506 0ustar piefelsimulant% Remember to use the lgrind style \File{list\_phylum\_example.k},{19:44},{Oct 21 1992} \L{\LB{\V{exprlist}:}\Tab{8}{\V{Nilexprlist}()}} \L{\LB{\|}\Tab{8}{\V{Consexprlist}(\V{expr}_\V{exprlist})}} \L{\LB{;}} kimwitu-doc-10a+1/attributed_phylum_example.tex0000444000114400011300000000050607076562234021710 0ustar piefelsimulant% Remember to use the lgrind style \File{attributed\_phylum\_example.k},{12:13},{Jan 17 1997} \L{\LB{\V{expr}:}\Tab{8}{\V{Plus}(\V{expr}_\V{expr})}} \L{\LB{\|}\Tab{8}{\V{Minus}(\V{expr}_\V{expr})}} \L{\LB{\|}\Tab{8}{\V{Neg}(\V{expr})}} \L{\LB{\|}\Tab{8}{\V{Zero}()}} \L{\LB{\{}\Tab{8}{\K{float}_\V{value}_=_0;\}}} \L{\LB{;}} kimwitu-doc-10a+1/alternative_attribute_initialisation.tex0000444000114400011300000000030607076562234024127 0ustar piefelsimulant% Remember to use the lgrind style \File{alternative\_attribute\_initialisation.k},{19:44},{Oct 21 1992} \L{\LB{\{}\Tab{8}{\K{float}_\V{value};}} \L{\LB{}\Tab{8}{\{_\V{\$0}\-\!\>\V{value}=0;\}\}}} kimwitu-doc-10a+1/uniq_phylum_example.tex0000444000114400011300000000031607076562235020515 0ustar piefelsimulant% Remember to use the lgrind style \File{uniq\_phylum\_example.k},{19:44},{Oct 21 1992} \L{\LB{\V{ID}_\{\K{uniq}\}:}\Tab{8}{\V{Str}(\K{casestring})}} \L{\LB{\{}\Tab{8}{\K{short}_\V{type}_=_\V{UNDEF};\};}} kimwitu-doc-10a+1/symbol_table_example.tex0000444000114400011300000000112307076562235020614 0ustar piefelsimulant% Remember to use the lgrind style \File{symbol\_table\_example.k},{19:44},{Oct 21 1992} \L{\LB{\C{}\1\* defining occurrence \*\1\CE{}}} \L{\LB{\V{id}_=_\V{Str}(\V{mkcasestring}(\S{}\"foo\"\SE{}));}} \L{\LB{\K{if}_(\V{id}\-\!\>\V{type}_!=_\V{UNDEF})_\V{error}(\S{}\"doubly_defined\"\SE{});}} \L{\LB{\V{id}\-\!\>\V{type}_=_\V{USED};_\C{}\1\* set other attributes here as well \*\1\CE{}}} \L{\LB{}} \L{\LB{\C{}\1\* applied occurrence \*\1\CE{}}} \L{\LB{\V{id}_=_\V{Str}(\V{mkcasestring}(\S{}\"foo\"\SE{}));}} \L{\LB{\K{if}_(\V{id}\-\!\>\V{type}_==_\V{UNDEF})_\V{error}(\S{}\"undefined\"\SE{});}} kimwitu-doc-10a+1/non_uniq_declaration_example.tex0000444000114400011300000000020407076562235022332 0ustar piefelsimulant% Remember to use the lgrind style \File{non\_uniq\_declaration\_example.k},{15:00},{Sep 28 1992} \L{\LB{\V{ID}_\{!_\K{uniq}\}:;}} kimwitu-doc-10a+1/storage_classes_example.tex0000444000114400011300000000061307076562236021325 0ustar piefelsimulant% Remember to use the lgrind style \File{storage\_classes\_example.k},{12:21},{Oct 26 1992} \L{\LB{\%\V{storageclass}_\V{intermediate}_\V{wholeprogram};__\C{}\1\* the {`}\%\' is part of the keyword \*\1\CE{}}} \L{\LB{}} \L{\LB{\V{ID2}_\{_\V{intermediate}_\}:_\V{Operator1}(_.\,.\,._)_\|_\V{Operator2}(_.\,.\,._)_\|_.\,.\,._;}} \L{\LB{\V{ID3}_\{_\V{wholeprogram}_\}:_\V{Operator3}(_.\,.\,._);}} kimwitu-doc-10a+1/redefine_LARGEPRIME_example.tex0000444000114400011300000000027207076562236021435 0ustar piefelsimulant% Remember to use the lgrind style \File{redefine\_LARGEPRIME\_example.k},{15:00},{Sep 28 1992} \L{\LB{\%\{_\V{KC\_TYPES\_HEADER}}} \L{\LB{\K{\#define}_\V{LARGEPRIME}_0}} \L{\LB{\%\}}} kimwitu-doc-10a+1/function_definition_example_newest.tex0000440000114400011300000000057507076562236023571 0ustar piefelsimulant% Remember to use the lgrind style \File{function\_definition\_example\_newest.k},{10:52},{Jan 23 1997} \L{\LB{\K{int}_\V{len}(\V{exprlist}_\V{el})_\{}} \L{\LB{}\Tab{4}{\K{with}(\V{el})_\{}} \L{\LB{}\Tab{8}{\V{Nilexprlist}:}\Tab{36}{\{_\K{return}_0;_\}}} \L{\LB{}\Tab{8}{\V{tt}_=_\V{Consexprlist}(\*,_\V{t}):}\Tab{36}{\{_\K{return}_\V{len}(\V{t})_+_1;_\}}} \L{\LB{\}}\Tab{4}{\}}} kimwitu-doc-10a+1/foreach_statement_example.tex0000444000114400011300000000051707076562236021642 0ustar piefelsimulant% Remember to use the lgrind style \File{foreach\_statement\_example.k},{14:33},{Feb 19 1997} \L{\LB{\K{int}_\V{len}(\V{exprlist}_\V{el})_\{}} \L{\LB{____\K{int}_\V{length}_=_0;_}} \L{\LB{____\K{foreach}(_\V{l};_\V{exprlist}_\V{el}_)_\{}} \L{\LB{________\V{length}++;}} \L{\LB{____\}}} \L{\LB{____\K{return}_\V{length};}} \L{\LB{\}}} kimwitu-doc-10a+1/foreach_with_statement_example.tex0000444000114400011300000000103407076562237022671 0ustar piefelsimulant% Remember to use the lgrind style \File{foreach\_with\_statement\_example.k},{14:36},{Feb 19 1997} \L{\LB{\V{expr}_\V{sum}(\V{exprlist}_\V{el})_\{}} \L{\LB{____\V{expr}_\V{sub\_total}_=_\V{Zero}();}} \L{\LB{____\K{foreach}(_\V{\$e};_\V{exprlist}_\V{el}_)_\{}} \L{\LB{________\V{Add}(_\V{x}_):}\Tab{24}{\{_\V{sub\_total}_=_\V{Plus}(_\V{sub\_total},_\V{x}_);_\}}} \L{\LB{________\V{Subtract}(_\V{x}_):}\Tab{24}{\{_\V{sub\_total}_=_\V{Minus}(_\V{sub\_total},_\V{x}_);_\}}} \L{\LB{____\}}} \L{\LB{____\K{return}_\V{sub\_total};}} \L{\LB{\}}} kimwitu-doc-10a+1/foreach_pattern_statement_example.tex0000444000114400011300000000063507076562237023401 0ustar piefelsimulant% Remember to use the lgrind style \File{foreach\_pattern\_statement\_example.k},{14:36},{Feb 19 1997} \L{\LB{\V{expr}_\V{add\_Adds}(\V{exprlist}_\V{el})_\{}} \L{\LB{____\V{expr}_\V{all\_Adds}_=_\V{Zero}();}} \L{\LB{____\K{foreach}(_\V{Add}(_\V{x}_);_\V{exprlist}_\V{el}_)_\{}} \L{\LB{________\V{all\_Adds}_=_\V{Plus}(_\V{all\_Adds},_\V{x}_);}} \L{\LB{____\}}} \L{\LB{____\K{return}_\V{all\_Adds};}} \L{\LB{\}}} kimwitu-doc-10a+1/rewrite_rule_example.tex0000444000114400011300000000022707076562237020656 0ustar piefelsimulant% Remember to use the lgrind style \File{rewrite\_rule\_example.k},{15:00},{Sep 28 1992} \L{\LB{\V{Neg}(\V{x})_\-\!\>_\V{Minus}(\V{Zero}(),_\V{x});}} kimwitu-doc-10a+1/rewrite_rule_function_example.tex0000444000114400011300000000023507076562240022554 0ustar piefelsimulant% Remember to use the lgrind style \File{rewrite\_rule\_function\_example.k},{15:00},{Sep 28 1992} \L{\LB{\V{Operator}(\V{x})_\-\!\>_\V{Function}(\V{x});}} kimwitu-doc-10a+1/unparse_definition_example.tex0000444000114400011300000000120107076562240022016 0ustar piefelsimulant% Remember to use the lgrind style \File{unparse\_definition\_example.k},{12:14},{Oct 27 1992} \L{\LB{\V{Plus}(\V{e1},_\V{e2})}\Tab{26}{\-\!\>_[_:_\V{e1}_\S{}\"+\"\SE{}_\V{e2}_];}} \L{\LB{\V{Minus}(\V{e1},_\V{e2})}\Tab{26}{\-\!\>_[_:_\V{e1}_\S{}\"\-\"\SE{}_\V{e2}_];}} \L{\LB{\V{Neg}(\V{e1})}\Tab{26}{\-\!\>_[_:_\S{}\"\-\"\SE{}_\V{e1}_];}} \L{\LB{\V{Zero}()}\Tab{26}{\-\!\>_[_:_\S{}\"0\"\SE{}_];}} \L{\LB{}} \L{\LB{\V{Nilexprlist}()}\Tab{26}{\-\!\>_[_:_];}} \L{\LB{\V{Consexprlist}(\V{ex},_\V{Nilexprlist}())}\Tab{26}{\-\!\>_[_:_\V{ex}_];}} \L{\LB{\V{Consexprlist}(\V{ex},_\V{rest})}\Tab{26}{\-\!\>_[_:_\V{ex}_\S{}\",_\"\SE{}_\V{rest}_];}} kimwitu-doc-10a+1/unparse_escaped_brackets_example.tex0000444000114400011300000000072307076562241023161 0ustar piefelsimulant% Remember to use the lgrind style \File{unparse\_escaped\_brackets\_example.k},{14:07},{Oct 27 1992} \L{\LB{\V{Divideby}(\V{e1},_\V{e2})}\Tab{24}{\-\!\>_[_:_\{_\K{if}_(\V{eq\_expr}(\V{e2},_\V{Zero}())_\}_\V{\$}\{}} \L{\LB{}\Tab{40}{\V{e1}_\S{}\"\1_\1\*_\<\!\-\-_division_by_zero_\-\-\!\>_\*\1\"\SE{}_\V{e2}}} \L{\LB{}\Tab{32}{_\V{\$}\}_\{_\K{else}_\}_\V{\$}\{}} \L{\LB{}\Tab{40}{\V{e1}_\S{}\"\1\"\SE{}_\V{e2}}} \L{\LB{}\Tab{32}{_\V{\$}\}}} \L{\LB{}\Tab{24}{___];}} kimwitu-doc-10a+1/unparse_views_example.tex0000444000114400011300000000030307076562241021026 0ustar piefelsimulant% Remember to use the lgrind style \File{unparse\_views\_example.k},{19:57},{Oct 21 1992} \L{\LB{\V{Plus}(\V{e1}_\V{e2})}\Tab{16}{\-\!\>_[_\V{view1}_\V{view2}:_\V{e1}_\S{}\"+\"\SE{}_\V{e2}_];}} kimwitu-doc-10a+1/unparse_view_declaration_example.tex0000444000114400011300000000030707076562241023214 0ustar piefelsimulant% Remember to use the lgrind style \File{unparse\_view\_declaration\_example.k},{14:04},{Feb 19 1997} \L{\LB{\%\V{uview}_\V{view1}_\V{view2};_\C{}\1\* the {`}\%\' is part of the keyword \*\1\CE{}}} kimwitu-doc-10a+1/printer_function_example.tex0000444000114400011300000000057007076562242021533 0ustar piefelsimulant% Remember to use the lgrind style \File{printer\_function\_example.k},{14:03},{Feb 19 1997} \L{\LB{\K{void}_\V{printer}(\K{char}_\*\V{s},_\V{uview}_\V{v})_\{}} \L{\LB{____\V{printf}(\S{}\"\%s\"\SE{},_\V{s});}} \L{\LB{\}}} \L{\LB{}} \L{\LB{\{_\C{}\1\* example of usage \*\1\CE{}}} \L{\LB{____\V{unparse\_exprlist}(\V{expression},_\V{printer},_\V{base\_uview});}} \L{\LB{\}}} kimwitu-doc-10a+1/including_in_generated_files_example.tex0000444000114400011300000000037107076562242024004 0ustar piefelsimulant% Remember to use the lgrind style \File{including\_in\_generated\_files\_example.k},{14:54},{Jul 22 1996} \L{\LB{\%\{_\C{}\1\* these brackets should be at the beginning of a line \*\1\CE{}}} \L{\LB{\K{\#include}_\<\V{math}.\V{h}\>}} \L{\LB{\%\}}} kimwitu-doc-10a+1/include_redirection_example.tex0000444000114400011300000000035207076562242022153 0ustar piefelsimulant% Remember to use the lgrind style \File{include\_redirection\_example.k},{14:54},{Jul 22 1996} \L{\LB{\%\{_\V{KC\_TYPES\_HEADER}}} \L{\LB{____\C{}\1\* Include this in k.h, and thus in every generated file. \*\1\CE{}}} \L{\LB{\%\}}} kimwitu-doc-10a+1/generated_data_type_example.tex0000444000114400011300000000302407076562243022131 0ustar piefelsimulant% Remember to use the lgrind style \File{generated\_data\_type\_example.k},{14:54},{Jul 22 1996} \L{\LB{\K{typedef}_\K{enum}_\{_.\,.\,.,_\V{sel\_Neg}_=_4,_\V{sel\_Minus}_=_5,_\V{sel\_Plus}_=_6,_\V{sel\_Zero}_=_7,}} \L{\LB{_________________\V{sel\_Nilexprlist}_=_8,_\V{sel\_Consexprlist}_=_9,_.\,.\,._\}_\V{kc\_enum\_operators};}} \L{\LB{}} \L{\LB{\K{typedef}_\K{struct}_\V{kc\_tag\_expr}_\*\V{expr};_\C{}\1\* note that a {`}expr\' is a pointer to a {`}struct kc\_tag\_expr\'\*\1\CE{}}} \L{\LB{\K{typedef}_\K{struct}_\V{kc\_tag\_exprlist}_\*\V{exprlist};}} \L{\LB{}} \L{\LB{\K{struct}_\V{kc\_tag\_expr}_\{}} \L{\LB{}\Tab{8}{\V{kc\_enum\_operators}_\V{prod\_sel};}} \L{\LB{}\Tab{8}{\K{union}_\{}} \L{\LB{}\Tab{16}{\K{struct}_\{}} \L{\LB{}\Tab{24}{\V{expr}_\V{expr\_1};}} \L{\LB{}\Tab{16}{\}_\V{Neg};}} \L{\LB{}\Tab{16}{\K{struct}_\{}} \L{\LB{}\Tab{24}{\V{expr}_\V{expr\_1};}} \L{\LB{}\Tab{24}{\V{expr}_\V{expr\_2};}} \L{\LB{}\Tab{16}{\}_\V{Minus};}} \L{\LB{}\Tab{16}{\K{struct}_\{}} \L{\LB{}\Tab{24}{\V{expr}_\V{expr\_1};}} \L{\LB{}\Tab{24}{\V{expr}_\V{expr\_2};}} \L{\LB{}\Tab{16}{\}_\V{Plus};}} \L{\LB{}\Tab{8}{\}_\V{u};}} \L{\LB{}\Tab{8}{\K{float}_\V{value};__\C{}\1\* an attribute \*\1\CE{}}} \L{\LB{\};}} \L{\LB{}} \L{\LB{\K{struct}_\V{kc\_tag\_exprlist}_\{}} \L{\LB{}\Tab{8}{\V{kc\_enum\_operators}_\V{prod\_sel};}} \L{\LB{}\Tab{8}{\K{union}_\{}} \L{\LB{}\Tab{16}{\K{struct}_\{}} \L{\LB{}\Tab{24}{\V{expr}_\V{expr\_1};}} \L{\LB{}\Tab{24}{\V{exprlist}_\V{exprlist\_1};}} \L{\LB{}\Tab{16}{\}_\V{Consexprlist};}} \L{\LB{}\Tab{8}{\}_\V{u};}} \L{\LB{\};}} kimwitu-doc-10a+1/include_redirection_symbol_example.tex0000444000114400011300000000030307076562243023535 0ustar piefelsimulant% Remember to use the lgrind style \File{include\_redirection\_symbol\_example.k},{14:54},{Jul 22 1996} \L{\LB{\%\{_\V{KC\_REWRITE}}} \L{\LB{\K{\#include}_\S{}\"my\_fns.h\"\SE{}}} \L{\LB{\%\}}} kimwitu-doc-10a+1/yacc_abstract_syntax_example.tex0000444000114400011300000000041607076562244022354 0ustar piefelsimulant% Remember to use the lgrind style \File{yacc\_abstract\_syntax\_example.k},{14:54},{Jul 22 1996} \L{\LB{\C{}\1\* Abstract syntax \*\1\CE{}}} \L{\LB{\V{funnytree}:}\Tab{16}{\V{Str}(\K{casestring})}} \L{\LB{\|}\Tab{16}{\V{Cons}(\V{funnytree}_\V{funnytree})}} \L{\LB{;}} kimwitu-doc-10a+1/structure_file_abstract_syntax_example.tex0000444000114400011300000000043107076562244024471 0ustar piefelsimulant% Remember to use the lgrind style \File{structure\_file\_abstract\_syntax\_example.k},{14:54},{Jul 22 1996} \L{\LB{\C{}\1\* Abstract syntax \*\1\CE{}}} \L{\LB{\V{funnytree}:}\Tab{16}{\V{Str}(\K{casestring})}} \L{\LB{\|}\Tab{16}{\V{Cons}(\V{funnytree}_\V{funnytree})}} \L{\LB{;}} kimwitu-doc-10a+1/funny.tex0000444000114400011300000000076707076562245015602 0ustar piefelsimulant% Remember to use the lgrind style \File{funny.k},{15:10},{Jul 19 1996} \L{\LB{\C{}\1\* A very simple tree structure \*\1\CE{}}} \L{\LB{\V{funnytree}:_}\Tab{16}{\V{Str}(\K{casestring})}} \L{\LB{\|_}\Tab{16}{\V{Cons}(\V{funnytree}_\V{funnytree})}} \L{\LB{;}} \L{\LB{}} \L{\LB{\K{int}_\V{nroftips}(\V{funnytree}_\V{\$f})}} \L{\LB{\{}} \L{\LB{____\V{Str}:}\Tab{16}{\{_\K{return}_1;_\}}} \L{\LB{____\V{Cons}(\V{l},_\V{r}):}\Tab{16}{\{_\K{return}_\V{nroftips}(\V{l})_+_\V{nroftips}(\V{r});_\}}} \L{\LB{\}}} kimwitu-doc-10a+1/unparse_control_sequences.tex0000444000114400011300000000023107076562245021715 0ustar piefelsimulant% Remember to use the lgrind style \File{unparse\_control\_sequences.k},{16:19},{Sep 28 1992} \L{\LB{\S{}\"start\%t\%nlevel1\%nlevel1\%b\%nend\"\SE{}}} kimwitu-doc-10a+1/unparse_output.tex0000444000114400011300000000026107076562245017525 0ustar piefelsimulant% Remember to use the lgrind style \File{unparse\_output.k},{16:19},{Sep 28 1992} \L{\LB{\V{start}}} \L{\LB{}\Tab{8}{\V{level1}}} \L{\LB{}\Tab{8}{\V{level1}}} \L{\LB{\V{end}}} kimwitu-doc-10a+1/knuth1.tex0000444000114400011300000000211507076562247015644 0ustar piefelsimulant% Remember to use the lgrind style \File{knuth1.k},{17:30},{Oct 5 1992} \L{\LB{\C{}\1\* From D. Knuth, Semantics of Context Free Languages \*\1\CE{}}} \L{\LB{\C{}\1\* The abstract syntax tree of fractional binary numbers, attributed \*\1\CE{}}} \L{\LB{\V{number}:}\Tab{10}{\V{Nonfraction}(\V{bitstring})}} \L{\LB{\|}\Tab{10}{\V{Fraction}(\V{bitstring}_\V{bitstring})}} \L{\LB{\{}\Tab{10}{\K{float}_\V{value};}\Tab{24}{\C{}\1\* synthesized \*\1\CE{}\}}} \L{\LB{;}} \L{\LB{}} \L{\LB{\V{bitstring}:}\Tab{10}{\V{Oneb}(\V{bit})}} \L{\LB{\|}\Tab{10}{\V{Moreb}(\V{bitstring}_\V{bit})}} \L{\LB{\{}\Tab{10}{\K{float}_\V{value};}\Tab{24}{\C{}\1\* synthesized \*\1\CE{}}} \L{\LB{}\Tab{10}{\K{int}_\V{length};}\Tab{24}{\C{}\1\* synthesized \*\1\CE{}}} \L{\LB{}\Tab{10}{\K{int}_\V{scale};}\Tab{24}{\C{}\1\* inherited \*\1\CE{}}} \L{\LB{\}}} \L{\LB{;}} \L{\LB{}} \L{\LB{\V{bit}:}\Tab{10}{\V{One}()}} \L{\LB{\|}\Tab{10}{\V{Zero}()}} \L{\LB{\{}\Tab{10}{\K{float}_\V{value};}\Tab{24}{\C{}\1\* synthesized \*\1\CE{}}} \L{\LB{}\Tab{10}{\K{int}_\V{scale};}\Tab{24}{\C{}\1\* inherited \*\1\CE{}}} \L{\LB{\}}} \L{\LB{;}} kimwitu-doc-10a+1/printer_function_with_indentation.tex0000444000114400011300000000201407076562246023446 0ustar piefelsimulant% Remember to use the lgrind style \File{printer\_function\_with\_indentation.k},{14:04},{Feb 19 1997} \L{\LB{\K{\#include}_\<\V{stdio}.\V{h}\>}} \L{\LB{\K{\#include}_\S{}\"unpk.h\"\SE{}}} \L{\LB{\K{static}_\V{indent}=0;}} \L{\LB{}} \L{\LB{\K{void}_\V{printer}(\K{char}_\*\V{s},_\V{uview}_\V{v})_\{}} \L{\LB{____\K{char}_\V{c};_\K{int}_\V{j};}} \L{\LB{}} \L{\LB{____\K{while}(\V{c}=\*\V{s}++)_\{}} \L{\LB{}\Tab{8}{\K{if}_(\V{c}!=\S{}\'\%\'\SE{})_\V{putchar}(\V{c});}} \L{\LB{}\Tab{8}{\K{else}_\K{switch}(\V{c}=\*\V{s}++)_\{}} \L{\LB{}\Tab{8}{\K{case}_\S{}\'b\'\SE{}:}\Tab{24}{\V{indent}\-\-;_\K{break};}} \L{\LB{}\Tab{8}{\K{case}_\S{}\'t\'\SE{}:}\Tab{24}{\V{indent}++;_\K{break};}} \L{\LB{}\Tab{8}{\K{case}_\S{}\'n\'\SE{}:}\Tab{24}{\V{putchar}(\S{}\'\2n\'\SE{});}} \L{\LB{}\Tab{24}{\K{for}_(\V{j}=\V{indent};_\V{j}\>0;_\V{j}\-\-)_\V{putchar}(\S{}\'\2t\'\SE{});}} \L{\LB{}\Tab{24}{\K{break};}} \L{\LB{}\Tab{8}{\K{case}_\S{}\'\20\'\SE{}:}\Tab{24}{\K{return};}} \L{\LB{}\Tab{8}{\K{default}:}\Tab{24}{\V{putchar}(\V{c});}} \L{\LB{\}\}\}}} kimwitu-doc-10a+1/knuth2.tex0000444000114400011300000000273007076562247015650 0ustar piefelsimulant% Remember to use the lgrind style \File{knuth2.k},{15:14},{Jul 19 1996} \L{\LB{\C{}\1\* illustrating attribute evaluation without storing the attributes \*\1\CE{}}} \L{\LB{\K{float}_\V{eval\_number\_value}(\V{number}_\V{\$n})_\{}} \L{\LB{____\V{Nonfraction}(\V{b}):}\Tab{24}{\{_\K{return}_\V{eval\_bitstring\_value}(\V{b},0);_\}}} \L{\LB{____\V{Fraction}(\V{b1},_\V{b2}):}\Tab{24}{\{_\K{return}_\V{eval\_bitstring\_value}(\V{b1},0)_+}} \L{\LB{}\Tab{32}{_\V{eval\_bitstring\_value}(\V{b2},_\-\V{eval\_bitstring\_length}(\V{b2}));\}}} \L{\LB{\}}} \L{\LB{}} \L{\LB{\K{float}_\V{eval\_bitstring\_value}(\V{bitstring}_\V{\$bs},_\K{int}_\V{scale})_\{}} \L{\LB{____\V{Oneb}(\V{b}):}\Tab{24}{\{_\K{return}_\V{eval\_bit\_value}(\V{b},_\V{scale});_\}}} \L{\LB{____\V{Moreb}(\V{bs\_bs},_\V{bs\_b}):}\Tab{24}{\{_\K{return}_\V{eval\_bitstring\_value}(\V{bs\_bs},\V{scale}+1)+_\V{eval\_bit\_value}(\V{bs\_b},_\V{scale});_\}}} \L{\LB{\}}} \L{\LB{}} \L{\LB{\K{int}_\V{eval\_bitstring\_length}(\V{bitstring}_\V{\$bs})_\{}} \L{\LB{____\V{Oneb}:}\Tab{24}{\{_\K{return}_1;_\}}} \L{\LB{____\V{Moreb}(\V{bs\_bs},_\*):}\Tab{24}{\{_\K{return}_\V{eval\_bitstring\_length}(\V{bs\_bs})+1;_\}}} \L{\LB{\}}} \L{\LB{}} \L{\LB{\%\{}} \L{\LB{\K{\#include}_\<\V{math}.\V{h}\>}} \L{\LB{\%\}}} \L{\LB{}} \L{\LB{\K{float}_\V{eval\_bit\_value}(\V{bit}_\V{\$b},_\K{int}_\V{scale})_\{}} \L{\LB{____\V{One}:}\Tab{24}{\{_\K{return}_\V{exp2}((\K{float})\V{scale});_\}}} \L{\LB{____\V{Zero}:}\Tab{24}{\{_\K{return}_0.0;_\}}} \L{\LB{\}}} kimwitu-doc-10a+1/knuth3.tex0000444000114400011300000000440107076562250015640 0ustar piefelsimulant% Remember to use the lgrind style \File{knuth3.k},{15:15},{Jul 19 1996} \L{\LB{\C{}\1\* illustrating a multi-pass evaluation \*\1\CE{}}} \L{\LB{\K{void}_\V{pass1\_number}(\V{number}_\V{\$n})_\{}} \L{\LB{____\V{Nonfraction}(\V{b}):}\Tab{24}{\{_\V{pass1\_bitstring}(\V{b});_\}}} \L{\LB{____\V{Fraction}(\V{b1},_\V{b2}):}\Tab{24}{\{_\V{pass1\_bitstring}(\V{b1});}} \L{\LB{}\Tab{24}{__\V{pass1\_bitstring}(\V{b2});_\}}} \L{\LB{\}}} \L{\LB{}} \L{\LB{\K{void}_\V{pass1\_bitstring}(\V{bitstring}_\V{\$b})_\{}} \L{\LB{____\V{Oneb}:}\Tab{24}{\{_\V{b}\-\!\>\V{length}=1;\}}} \L{\LB{____\V{Moreb}(\V{bs},_\*):}\Tab{24}{\{_\V{pass1\_bitstring}(\V{bs});}} \L{\LB{}\Tab{24}{__\V{b}\-\!\>\V{length}=\V{bs}\-\!\>\V{length}+1;_\}}} \L{\LB{\}}} \L{\LB{}} \L{\LB{\C{}\1\* pass1\_bit omitted, it does nothing \*\1\CE{}}} \L{\LB{}} \L{\LB{\K{void}_\V{pass2\_number}(\V{number}_\V{\$n})_\{}} \L{\LB{____\V{Nonfraction}(\V{b}):}\Tab{24}{\{_\V{b}\-\!\>\V{scale}=0;}} \L{\LB{}\Tab{24}{__\V{pass2\_bitstring}(\V{b});}} \L{\LB{}\Tab{24}{__\V{n}\-\!\>\V{value}=_\V{b}\-\!\>\V{value};_\}}} \L{\LB{____\V{Fraction}(\V{b1},_\V{b2}):}\Tab{24}{\{_\V{b1}\-\!\>\V{scale}=0;}} \L{\LB{}\Tab{24}{__\V{b2}\-\!\>\V{scale}=_\-\V{b2}\-\!\>\V{length};}} \L{\LB{}\Tab{24}{__\V{pass2\_bitstring}(\V{b1});}} \L{\LB{}\Tab{24}{__\V{pass2\_bitstring}(\V{b2});}} \L{\LB{}\Tab{24}{__\V{n}\-\!\>\V{value}=_\V{b1}\-\!\>\V{value}+\V{b2}\-\!\>\V{value};_\}}} \L{\LB{\}}} \L{\LB{}} \L{\LB{\K{void}_\V{pass2\_bitstring}(\V{bitstring}_\V{\$bs})_\{}} \L{\LB{____\V{Oneb}(\V{b}):}\Tab{24}{\{_\V{b}\-\!\>\V{scale}=_\V{bs}\-\!\>\V{scale};}} \L{\LB{}\Tab{24}{__\V{pass2\_bit}(\V{b});}} \L{\LB{}\Tab{24}{__\V{bs}\-\!\>\V{value}=_\V{b}\-\!\>\V{value};_\}}} \L{\LB{____\V{Moreb}(\V{b1},_\V{b2}):}\Tab{24}{\{_\V{b2}\-\!\>\V{scale}=_\V{bs}\-\!\>\V{scale};}} \L{\LB{}\Tab{24}{__\V{b1}\-\!\>\V{scale}=_\V{bs}\-\!\>\V{scale}+1;}} \L{\LB{}\Tab{24}{__\V{pass2\_bitstring}(\V{b1});}} \L{\LB{}\Tab{24}{__\V{pass2\_bit}(\V{b2});}} \L{\LB{}\Tab{24}{__\V{bs}\-\!\>\V{value}=_\V{b1}\-\!\>\V{value}_+_\V{b2}\-\!\>\V{value};_\}}} \L{\LB{\}}} \L{\LB{}} \L{\LB{\K{void}_\V{pass2\_bit}(\V{bit}_\V{\$b})_\{}} \L{\LB{____\V{One}:}\Tab{24}{\{_\V{b}\-\!\>\V{value}=_\V{exp2}((\K{float})\V{b}\-\!\>\V{scale});_\}}} \L{\LB{____\V{Zero}:}\Tab{24}{\{_\V{b}\-\!\>\V{value}=_0.0;_\}}} \L{\LB{\}}} kimwitu-doc-10a+1/knuth4.tex0000444000114400011300000000200007076562250015632 0ustar piefelsimulant% Remember to use the lgrind style \File{knuth4.k},{16:16},{Jul 19 1996} \L{\LB{\C{}\1\* the main program to call the evaluations \*\1\CE{}}} \L{\LB{\K{void}_\V{main}()_\{}} \L{\LB{____\V{number}_\V{n};}} \L{\LB{____\V{n}_=_\V{Fraction}(\V{Moreb}(\V{Moreb}(\V{Moreb}(\V{Oneb}(\V{One}()),_\V{One}()),_\V{Zero}()),_\V{One}()),}} \L{\LB{}\Tab{16}{_\V{Moreb}(\V{Oneb}(\V{Zero}()),_\V{One}()));_\C{}\1\* 1101.01 \*\1\CE{}}} \L{\LB{____\V{printf}(\S{}\"_\%f_\2n\"\SE{},_\V{eval\_number\_value}(\V{n}));}} \L{\LB{____\V{pass1\_number}(\V{n});_\V{pass2\_number}(\V{n});_}} \L{\LB{____\V{printf}(\S{}\"_\%f_\2n\"\SE{},_\V{n}\-\!\>\V{value});}} \L{\LB{}} \L{\LB{____\V{n}_=_\V{Nonfraction}(\V{Moreb}(\V{Moreb}(\V{Moreb}(\V{Oneb}(\V{One}()),_\V{One}()),_\V{Zero}()),_\V{One}()));}} \L{\LB{____\V{printf}(\S{}\"_\%f_\2n\"\SE{},_\V{eval\_number\_value}(\V{n}));_\C{}\1\* 1101 \*\1\CE{}}} \L{\LB{____\V{pass1\_number}(\V{n});_\V{pass2\_number}(\V{n});_}} \L{\LB{____\V{printf}(\S{}\"_\%f_\2n\"\SE{},_\V{n}\-\!\>\V{value});}} \L{\LB{\}}} kimwitu-doc-10a+1/nats_newer.tex0000440000114400011300000000202607076562251016567 0ustar piefelsimulant% Remember to use the lgrind style \File{nats\_newer.k},{11:36},{Jan 23 1997} \L{\LB{\C{}\1\* the abstract data type of natural numbers \*\1\CE{}}} \L{\LB{\V{nat}:}\Tab{8}{\V{zero}()}} \L{\LB{\|}\Tab{8}{\V{s}(\V{nat})}} \L{\LB{\|}\Tab{8}{\V{plus}(\V{nat}_\V{nat})}} \L{\LB{\|}\Tab{8}{\V{mul}(\V{nat}_\V{nat})}} \L{\LB{\|}\Tab{8}{\V{ack}(\V{nat}_\V{nat})}} \L{\LB{;}} \L{\LB{}} \L{\LB{\C{}\1\* rewrite rules for addition, multiplication, and Ackermann\'s function \*\1\CE{}}} \L{\LB{\V{plus}(\V{x},_\V{zero}())}\Tab{13}{\-\!\>_\V{x};}\Tab{36}{\V{ack}(\V{zero}(),_\V{x})}\Tab{52}{\-\!\>_\V{s}(\V{x});}} \L{\LB{\V{plus}(\V{x},_\V{s}(\V{y}))}\Tab{13}{\-\!\>_\V{s}(\V{plus}(\V{x},_\V{y}));}\Tab{36}{\V{ack}(\V{s}(\V{x}),_\V{zero}())}\Tab{52}{\-\!\>_\V{ack}(\V{x},_\V{s}(\V{zero}()));}} \L{\LB{\V{mul}(\V{x},_\V{zero}())}\Tab{13}{\-\!\>_\V{zero}();}\Tab{36}{\V{ack}(\V{s}(\V{x}),_\V{s}(\V{y}))}\Tab{52}{\-\!\>_\V{ack}(\V{x},_\V{ack}(\V{s}(\V{x}),\V{y}));}} \L{\LB{\V{mul}(\V{x},_\V{s}(\V{y}))}\Tab{13}{\-\!\>_\V{plus}(\V{mul}(\V{x},_\V{y}),_\V{x});}} kimwitu-doc-10a+1/ski.tex0000444000114400011300000000144407076562252015220 0ustar piefelsimulant% Remember to use the lgrind style \File{ski.k},{17:02},{Sep 28 1992} \L{\LB{\C{}\1\* SKI combinator reduction \*\1\CE{}}} \L{\LB{\%\{_\V{KC\_REWRITE}}} \L{\LB{\K{int}_\V{cplus}();}} \L{\LB{\%\}}} \L{\LB{}} \L{\LB{\V{exp}:}\Tab{8}{\V{S}()}} \L{\LB{\|}\Tab{8}{\V{K}()}} \L{\LB{\|}\Tab{8}{\V{I}()}} \L{\LB{\|}\Tab{8}{\V{ap}(\V{exp}_\V{exp})}} \L{\LB{\|}\Tab{8}{\V{num}(\K{int})}} \L{\LB{\|}\Tab{8}{\V{plus}()}} \L{\LB{;}} \L{\LB{}} \L{\LB{\V{ap}(\V{I}(),_\V{x})}\Tab{32}{\-\!\>_\V{x};}} \L{\LB{\V{ap}(\V{ap}(\V{K}(),_\V{x}),_\V{y})}\Tab{32}{\-\!\>_\V{x};}} \L{\LB{\V{ap}(\V{ap}(\V{ap}(\V{S}(),_\V{x}),_\V{y}),_\V{z})}\Tab{32}{\-\!\>_\V{ap}(\V{ap}(\V{x},_\V{z}),_\V{ap}(\V{y},_\V{z}));}} \L{\LB{\V{ap}(\V{ap}(\V{plus}(),_\V{num}(\V{x})),_\V{num}(\V{y}))}\Tab{32}{\-\!\>_\V{num}(\V{cplus}(\V{x},_\V{y}));}} kimwitu-doc-10a+1/fibonacci.tex0000444000114400011300000000101607076562253016343 0ustar piefelsimulant% Remember to use the lgrind style \File{fibonacci.k},{15:29},{Jul 19 1996} \L{\LB{\C{}\1\* Fibonacci \*\1\CE{}}} \L{\LB{\%\{}} \L{\LB{\K{\#include}_\S{}\"rk.h\"\SE{}}} \L{\LB{\%\}}} \L{\LB{}} \L{\LB{\V{nat}_\V{fib}(\V{nat}_\V{\$n})_\{}} \L{\LB{____\V{zero}():}\Tab{16}{\{_\K{return}_\V{s}(\V{zero}());_\}}} \L{\LB{____\V{s}(\V{zero}()):}\Tab{16}{\{_\K{return}_\V{s}(\V{zero}());_\}}} \L{\LB{____\V{s}(\V{s}(\V{x})):}\Tab{16}{\{_\K{return}_\V{rewrite\_nat}(_\V{plus}(\V{fib}(\V{x}),_\V{fib}(\V{s}(\V{x}))));_\}}} \L{\LB{\}}} kimwitu-doc-10a+1/fibonacci_memo.tex0000444000114400011300000000210107076562253017354 0ustar piefelsimulant% Remember to use the lgrind style \File{fibonacci\_memo.k},{15:30},{Jul 19 1996} \L{\LB{\C{}\1\* Fibonacci with memo function \*\1\CE{}}} \L{\LB{\V{nat}\{\K{uniq}\}:}\Tab{16}{\V{zero}()}} \L{\LB{\|}\Tab{16}{\V{s}(\V{nat})}} \L{\LB{\|}\Tab{16}{\V{plus}(\V{nat}_\V{nat})}} \L{\LB{\{}\Tab{16}{\V{nat}_\V{fib}_=_(\V{nat})0;_\}}} \L{\LB{;}} \L{\LB{}} \L{\LB{\C{}\1\* rewrite rules omitted \*\1\CE{}}} \L{\LB{}} \L{\LB{\%\{}} \L{\LB{\K{\#include}_\S{}\"rk.h\"\SE{}}} \L{\LB{\%\}}} \L{\LB{}} \L{\LB{\V{nat}_\V{fibm}(\V{nat}_\V{n})_\{}} \L{\LB{____\V{nat}_\V{result};}} \L{\LB{____\K{if}_(\V{n}\-\!\>\V{fib}_!=_(\V{nat})0)_\K{return}_\V{n}\-\!\>\V{fib};}} \L{\LB{____\K{with}(\V{n})\{}} \L{\LB{}\Tab{8}{\V{zero}():}\Tab{24}{\{_\V{result}_=_\V{s}(\V{zero}());_\}}} \L{\LB{}\Tab{8}{\V{s}(\V{zero}()):}\Tab{24}{\{_\V{result}_=_\V{s}(\V{zero}());_\}}} \L{\LB{}\Tab{8}{\V{s}(\V{s}(\V{x})):}\Tab{24}{\{_\V{result}_=_\V{rewrite\_nat}(_\V{plus}(\V{fibm}(\V{x}),_\V{fibm}(\V{s}(\V{x}))));_\}}} \L{\LB{____\}}} \L{\LB{____\V{n}\-\!\>\V{fib}_=_\V{result};}} \L{\LB{____\K{return}_\V{result};}} \L{\LB{\}}} kimwitu-doc-10a+1/comsub.tex0000444000114400011300000000306107076562254015721 0ustar piefelsimulant% Remember to use the lgrind style \File{comsub.k},{15:30},{Jul 19 1996} \L{\LB{\C{}\1\* A very simple tree structure \*\1\CE{}}} \L{\LB{\V{funnytree}_\{\K{uniq}\}:}\Tab{24}{\V{Str}(\K{casestring})}} \L{\LB{\|_}\Tab{24}{\V{Cons}(\V{funnytree}_\V{funnytree})}} \L{\LB{\{}\Tab{24}{\K{int}_\V{ocs}_=_0;}} \L{\LB{}\Tab{24}{\V{funnytree}_\V{next}_;}} \L{\LB{}\Tab{24}{\{_\V{\$0}\-\!\>\V{next}_=_\V{alltrees};}} \L{\LB{}\Tab{24}{__\V{alltrees}_=_\V{\$0};_\}}} \L{\LB{\}}} \L{\LB{;}} \L{\LB{}} \L{\LB{\K{void}_\V{occurs}(\V{funnytree}_\V{\$f})_\{}} \L{\LB{____\V{Str}:}\Tab{24}{\{_\V{f}\-\!\>\V{ocs}++;_\}}} \L{\LB{____\V{Cons}(\V{f1},_\V{f2}):}\Tab{24}{\{_\V{f}\-\!\>\V{ocs}++;_\V{occurs}(\V{f1});_\V{occurs}(\V{f2});_\}}} \L{\LB{\}}} \L{\LB{}} \L{\LB{\%\{_\V{KC\_TYPES\_HEADER}}} \L{\LB{\V{funnytree}_\V{alltrees};}} \L{\LB{\%\}}} \L{\LB{}} \L{\LB{\K{void}_\V{main}()_\{}} \L{\LB{____\V{funnytree}_\V{ft},_\V{it};}} \L{\LB{}} \L{\LB{____\V{alltrees}_=_(\V{funnytree})0;}} \L{\LB{____\V{ft}_=_\V{Str}(\V{mkcasestring}(\S{}\"foo\"\SE{}));}} \L{\LB{____\V{ft}_=_\V{Cons}(\V{ft},_\V{ft});}} \L{\LB{____\V{ft}_=_\V{Cons}(\V{ft},_\V{Str}(\V{mkcasestring}(\S{}\"bar\"\SE{})));}} \L{\LB{____\V{ft}_=_\V{Cons}(\V{ft},_\V{ft});}} \L{\LB{____\V{it}_=_\V{alltrees};}} \L{\LB{____\V{occurs}(\V{it});}} \L{\LB{____\K{for}(;_\V{it}!=_(\V{funnytree})0;_\V{it}=_\V{it}\-\!\>\V{next})_\{}} \L{\LB{}\Tab{8}{\K{if}_(\V{it}\-\!\>\V{ocs}\>1)_\{}} \L{\LB{}\Tab{8}{____\V{printf}(\S{}\"occurs_\%d_times:\2n\"\SE{},_\V{it}\-\!\>\V{ocs});}} \L{\LB{}\Tab{8}{____\V{print\_funnytree}(\V{it});}} \L{\LB{\}___\}___\}}} kimwitu-doc-10a+1/printer_function_with_view_indentation.tex0000444000114400011300000000161007076562254024500 0ustar piefelsimulant% Remember to use the lgrind style \File{printer\_function\_with\_view\_indentation.k},{14:49},{Feb 19 1997} \L{\LB{\K{\#include}_\<\V{stdio}.\V{h}\>}} \L{\LB{\K{\#include}_\S{}\"unpk.h\"\SE{}}} \L{\LB{}} \L{\LB{\K{void}_\V{printer}(\K{char}_\*\V{s},_\V{uview}_\V{v})_\{}} \L{\LB{____\K{char}_\V{c};_\K{int}_\V{j};}} \L{\LB{____\K{static}_\V{indent}=0;}\Tab{24}{\C{}\1\* static here, or static at file level \*\1\CE{}}} \L{\LB{}} \L{\LB{____\K{switch}(\V{v})_\{}} \L{\LB{____\K{case}_\V{v\_left}:}\Tab{24}{\V{indent}\-\-;_\K{break};}} \L{\LB{____\K{case}_\V{v\_right}:}\Tab{24}{\V{indent}++;_\K{break};}} \L{\LB{____\K{case}_\V{v\_nl}:}\Tab{24}{\V{putchar}(\S{}\'\2n\'\SE{});}} \L{\LB{}\Tab{24}{\K{for}_(\V{j}=\V{indent};_\V{j}\>0;_\V{j}\-\-)_\V{putchar}(\S{}\'\2t\'\SE{});}} \L{\LB{}\Tab{24}{\K{break};}} \L{\LB{____\K{default}:}\Tab{24}{\V{printf}(\S{}\"\%s\"\SE{},_\V{s});_\K{break};}} \L{\LB{\}\}}} kimwitu-doc-10a+1/printer_function_with_control_view_indentation.tex0000444000114400011300000000200007076562255026233 0ustar piefelsimulant% Remember to use the lgrind style \File{printer\_function\_with\_control\_view\_indentation.k},{14:04},{Feb 19 1997} \L{\LB{\K{\#include}_\<\V{stdio}.\V{h}\>}} \L{\LB{\K{\#include}_\S{}\"unpk.h\"\SE{}}} \L{\LB{}} \L{\LB{\K{void}_\V{printer}(\K{char}_\*\V{s},_\V{uview}_\V{v})_\{}} \L{\LB{____\K{char}_\V{c};_\K{int}_\V{j};}} \L{\LB{____\K{static}_\V{indent}=0;}\Tab{24}{\C{}\1\* static here, or static at file level \*\1\CE{}}} \L{\LB{}} \L{\LB{____\K{switch}(\V{v})_\{}} \L{\LB{____\K{case}_\V{v\_left}:}\Tab{24}{\V{indent}\-\-;_\K{break};}} \L{\LB{____\K{case}_\V{v\_right}:}\Tab{24}{\V{indent}++;_\K{break};}} \L{\LB{____\K{default}:}\Tab{24}{\K{while}(\V{c}=\*\V{s}++)_\{}} \L{\LB{}\Tab{24}{____\K{switch}(\V{c})\{}} \L{\LB{}\Tab{24}{____\K{case}_\S{}\'\2n\'\SE{}:}\Tab{40}{\V{putchar}(\V{c});}} \L{\LB{}\Tab{40}{\K{for}_(\V{j}=\V{indent};_\V{j}\>0;_\V{j}\-\-)_\V{putchar}(\S{}\'\2t\'\SE{});}} \L{\LB{}\Tab{40}{\K{break};}} \L{\LB{}\Tab{24}{____\K{default}:}\Tab{40}{\V{putchar}(\V{c});_\K{break};}} \L{\LB{\}\}\}\}}} kimwitu-doc-10a+1/unparse_view_sequences.tex0000444000114400011300000000045607076562256021222 0ustar piefelsimulant% Remember to use the lgrind style \File{unparse\_view\_sequences.k},{13:12},{Oct 26 1992} \L{\LB{\S{}\"start\"\SE{}_\S{}\"\"\SE{}:\V{v\_right}_\S{}\"\"\SE{}:\V{v\_nl}_\S{}\"level1\"\SE{}_\S{}\"\"\SE{}:\V{v\_nl}_\S{}\"level1\"\SE{}_\S{}\"\"\SE{}:\V{v\_left}_\S{}\"\"\SE{}:\V{v\_nl}_\S{}\"end\"\SE{}}} kimwitu-doc-10a+1/unparse_control_view_sequences.tex0000444000114400011300000000035207076562256022755 0ustar piefelsimulant% Remember to use the lgrind style \File{unparse\_control\_view\_sequences.k},{13:12},{Oct 26 1992} \L{\LB{\S{}\"start\"\SE{}_\S{}\"\"\SE{}:\V{v\_right}_\S{}\"\2nlevel1\2nlevel1\"\SE{}_\S{}\"\"\SE{}:\V{v\_left}_\S{}\"\2nend\"\SE{}}} kimwitu-doc-10a+1/rewrite_rule_general_example.tex0000440000114400011300000000025107076562256022345 0ustar piefelsimulant% Remember to use the lgrind style \File{rewrite\_rule\_general\_example.k},{12:19},{Feb 19 1997} \L{\LB{\V{Neg}(\V{x})_\-\!\>_\<:__\V{Minus}(\V{Zero}(),_\V{x})__\>;}} kimwitu-doc-10a+1/rewrite_views_example.tex0000440000114400011300000000026507076562257021044 0ustar piefelsimulant% Remember to use the lgrind style \File{rewrite\_views\_example.k},{12:21},{Feb 19 1997} \L{\LB{\V{Neg}(\V{x})_\-\!\>_\<_\V{view1}_\V{view2}:__\V{Minus}(\V{Zero}(),_\V{x})__\>;}} kimwitu-doc-10a+1/skil.tex0000444000114400011300000000120707076562263015373 0ustar piefelsimulant% Remember to use the lgrind style \File{skil.l},{15:11},{Oct 27 1992} \L{\LB{\C{}\1\* lex input for numbers \*\1\CE{}}} \L{\LB{\%\{}} \L{\LB{\K{\#include}_\S{}\"k.h\"\SE{}_}} \L{\LB{\K{\#include}_\S{}\"y.tab.h\"\SE{}}} \L{\LB{\K{\#include}_\<\V{stdio}.\V{h}\>}} \L{\LB{\K{\#include}_\<\V{ctype}.\V{h}\>}} \L{\LB{\%\}}} \L{\LB{\%\%}} \L{\LB{[0\-9]+}\Tab{16}{\{_\V{sscanf}(\V{yytext},_\S{}\"\%d\"\SE{},_\&\V{yylval}.\V{yt\_int});_\K{return}_\V{NUM};\}}} \L{\LB{[\2\V{t}\2\V{n}_]}\Tab{16}{\{_;_\}__\C{}\1\* skip the white space \*\1\CE{}}} \L{\LB{.}\Tab{16}{\{_\K{return}_(\V{isupper}(\V{yytext}[0])?\V{tolower}(\V{yytext}[0]):\V{yytext}[0]);_\}}} kimwitu-doc-10a+1/unparse_rule_general.tex0000440000114400011300000000033307076562257020630 0ustar piefelsimulant% Remember to use the lgrind style \File{unparse\_rule\_general.k},{12:33},{Feb 19 1997} \L{\LB{\V{pattern1},_\V{pattern2},_.\,.\,._\-\!\>_[_\V{v1}_\V{v2}_.\,.\,._:_.\,.\,._],_.\,.\,.,_[_\V{vn}_.\,.\,._:_.\,.\,._]_;}} kimwitu-doc-10a+1/rewrite_rule_general.tex0000440000114400011300000000033707076562257020640 0ustar piefelsimulant% Remember to use the lgrind style \File{rewrite\_rule\_general.k},{12:33},{Feb 19 1997} \L{\LB{\V{pattern1},_\V{pattern2},_.\,.\,._\-\!\>_\<_\V{v1}_\V{v2}_.\,.\,._:_.\,.\,._\>,_.\,.\,.,_\<_\V{vn}_.\,.\,._:_.\,.\,._\>_;}} kimwitu-doc-10a+1/function_definition_example_dollar.tex0000440000114400011300000000051607076562260023531 0ustar piefelsimulant% Remember to use the lgrind style \File{function\_definition\_example\_dollar.k},{16:13},{Feb 19 1997} \L{\LB{\K{int}_\V{len}(\V{exprlist}_\V{\$el})_\{}} \L{\LB{}\Tab{4}{\V{Nilexprlist}:}\Tab{32}{\{_\K{return}_0;_\}}} \L{\LB{}\Tab{4}{\V{tt}_=_\V{Consexprlist}(\*,_\V{t}):}\Tab{32}{\{_\K{return}_\V{len}(\V{t})_+_1;_\}}} \L{\LB{\}}} kimwitu-doc-10a+1/function_definition_equiv.tex0000440000114400011300000000131607076562260021671 0ustar piefelsimulant% Remember to use the lgrind style \File{function\_definition\_equiv.k},{14:53},{Feb 10 1998} \L{\LB{\V{boolean}_\V{equiv}(\V{expr}_\V{\$a},_\V{expr}_\V{\$b})_\{}} \L{\LB{____\V{Add}(_\V{asub}_)_\&_\V{Add}(_\V{bsub}_),_\V{Subtract}(_\V{asub}_)_\&_\V{Subtract}(_\V{bsub}_),_\V{Const}(_\*_)_\&_\V{Const}(_\*_):}} \L{\LB{_____________\{_\K{return}_\V{equiv}(_\V{asub},_\V{bsub}_);_\}}} \L{\LB{____\V{Plus}(_\V{asub1},_\V{asub2}_)_\&_\V{Plus}(_\V{bsub1},_\V{bsub2}_),_\V{Minus}(_\V{asub1},_\V{asub2}_)_\&_\V{Minus}(_\V{bsub1},_\V{bsub2}_):}} \L{\LB{_____________\{_\K{return}_\V{equiv}(_\V{asub1},_\V{bsub1}_)_\&\&_\V{equiv}(_\V{asub2},_\V{bsub2}_);_\}}} \L{\LB{____\K{default}:_\{_\K{return}_\V{False};_\}}} \L{\LB{\}}} kimwitu-doc-10a+1/foreach_equiv.tex0000440000114400011300000000134207076562260017242 0ustar piefelsimulant% Remember to use the lgrind style \File{foreach\_equiv.k},{14:47},{Feb 10 1998} \L{\LB{\V{boolean}_\V{equiv\_lists}(\V{exprlist}_\V{el1},_\V{exprlist}_\V{el2})_\{}} \L{\LB{____\V{boolean}_\V{result}_=_\V{True};}} \L{\LB{____\K{foreach}(_\V{e1}_\&_\V{e2};_\V{exprlist}_\V{el1},_\V{explist}_\V{el2}_)_\{}} \L{\LB{________\C{}\1\* this body is executed as long as both lists have elements \*\1\CE{}}} \L{\LB{________\V{result}_=_\V{result}_\&\&_\V{equiv}(_\V{e1},_\V{e2}_);}} \L{\LB{____\}}} \L{\LB{____\C{}\1\* we don\'t know if one list is longer than the other; we only \*\1\CE{}}} \L{\LB{____\C{}\1\* know the \'result\' for the elements that we compared with \'equiv\' \*\1\CE{}}} \L{\LB{____\K{return}_\V{result};}} \L{\LB{\}}} kimwitu-doc-10a+1/foreach_equiv_afterforeach.tex0000440000114400011300000000163107076562261021755 0ustar piefelsimulant% Remember to use the lgrind style \File{foreach\_equiv\_afterforeach.k},{13:41},{Feb 10 1998} \L{\LB{\V{boolean}_\V{equiv\_lists}(\V{exprlist}_\V{el1},_\V{exprlist}_\V{el2})_\{}} \L{\LB{____\V{boolean}_\V{result}_=_\V{True};}} \L{\LB{____\K{foreach}(_\V{e1}_\&_\V{e2};_\V{exprlist}_\V{el1},_\V{explist}_\V{el2}_)_\{}} \L{\LB{________\C{}\1\* this body is executed as long as both lists have elements \*\1\CE{}}} \L{\LB{________\V{result}_=_\V{result}_\&\&_\V{equiv}(_\V{e1},_\V{e2}_);}} \L{\LB{____\}_\V{afterforeach}(_\V{\$re1}_\&_\V{\$re2}_\C{}\1\* same number of items here as in foreach \*\1\CE{}_)_\{}} \L{\LB{________\V{Nilexprlist}()_\&_\V{Nilexprlist}()_:_\{_\C{}\1\* both lists same length: result unchanged \*\1\CE{}_\}}} \L{\LB{________\K{default}:_\{_\C{}\1\* lists have different length: result changed \*\1\CE{}_\V{result}_=_\V{False};_\}}} \L{\LB{____\}}} \L{\LB{____\K{return}_\V{result};}} \L{\LB{\}}} kimwitu-doc-10a+1/pattern_parameterized.tex0000440000114400011300000000063407076562261021017 0ustar piefelsimulant% Remember to use the lgrind style \File{pattern\_parameterized.k},{14:51},{Feb 10 1998} \L{\LB{\V{boolean}_\V{has\_sub}(\V{expr}_\V{\$a},_\V{expr}_\V{\$b})_\{}} \L{\LB{____\V{Add}(_\V{asub}_)_\&_\V{asub}_=_\*_:_\{}} \L{\LB{________\C{}\1\* code here will be executed if \'a\' has top-operator \'Add\', and asub == b \*\1\CE{}}} \L{\LB{____\}}} \L{\LB{____\K{default}:_\{_\K{return}_\V{False};_\}}} \L{\LB{\}}} kimwitu-doc-10a+1/pattern_equivalence.tex0000440000114400011300000000056607076562262020471 0ustar piefelsimulant% Remember to use the lgrind style \File{pattern\_equivalence.k},{15:01},{Feb 10 1998} \L{\LB{\V{Add}(_\V{asub}_)_\&___\V{Add}_(_\V{bsub}_)_,_\V{Add}(_\V{asub}_)_\&_\V{Subtract}_(_\V{bsub}_)___:_\{_\C{}\1\* C-code \*\1\CE{}_\}}} \L{\LB{}} \L{\LB{\V{Add}(_\V{asub}_)_\&_(_\V{Add}_(_\V{bsub}_)_,_______________\V{Subtract}_(_\V{bsub}_)_)__:_\{_\C{}\1\* C-code \*\1\CE{}_\}}} kimwitu-doc-10a+1/foreach_equivalence.tex0000440000114400011300000000324107076562262020414 0ustar piefelsimulant% Remember to use the lgrind style \File{foreach\_equivalence.k},{14:56},{Feb 10 1998} \L{\LB{\C{}\1\* here we combine all kinds of items: patterns, dollar-prefixed variables}} \L{\LB{_\* and ordinary variables. Of course, the items can appear in arbitrary order.}} \L{\LB{_\*\1\CE{}}} \L{\LB{\K{foreach}(_\V{pattern1}_\&_.\,.\,._\&_\V{patternk}_\&_\V{\$dvar1}_\&_.\,.\,._\&_\V{\$dvarn}_\&_\V{var1}_\&_.\,.\,._\&_\V{varm}_;_.\,.\,._)_\{}} \L{\LB{}\Tab{8}{\C{}\1\* body, in which we can refer to}} \L{\LB{}\Tab{8}{_\* pattern variables, dollar-prefixed variables (dvar\*)}} \L{\LB{}\Tab{8}{_\* and ordinary variables (var\*)}} \L{\LB{}\Tab{8}{_\* but (most often) not to dollar-variables (\$i, i \>= 0)}} \L{\LB{}\Tab{8}{_\*\1\CE{}}} \L{\LB{\}}} \L{\LB{}} \L{\LB{\C{}\1\* here we have the same statement, in which we only use ordinary variables,}} \L{\LB{_\* (and we introduced \'anonymous\' variables for the patterns pat\* ),}} \L{\LB{_\* together with nested with statements.}} \L{\LB{_\*\1\CE{}}} \L{\LB{\K{foreach}(_\V{var\_pat1}_\&_.\,.\,._\&_\V{var\_patk}_\&_\V{dvar1}_\&_.\,.\,._\&_\V{dvarn}_\&_\V{var1}_\&_.\,.\,._\&_\V{varm}_;_.\,.\,._)_\{}} \L{\LB{}\Tab{8}{\K{with}(_\V{var\_pat1},_.\,.\,.,_\V{var\_patk}_)_\{}} \L{\LB{}\Tab{16}{\V{pattern1}_\&_.\,.\,._\&_\V{patternk}_:_\{}} \L{\LB{}\Tab{24}{\K{with}(_\V{dvar1},_.\,.\,.,_\V{dvarn}_)_\{}} \L{\LB{}\Tab{32}{\C{}\1\* body, in which we can refer to}} \L{\LB{}\Tab{32}{_\* pattern variables, dollar-prefixed variables (dvar\*)}} \L{\LB{}\Tab{32}{_\* and ordinary variables (var\*)}} \L{\LB{}\Tab{32}{_\* but (most often) not to dollar-variables (\$i, i \>= 0)}} \L{\LB{}\Tab{32}{_\*\1\CE{}}} \L{\LB{\}}\Tab{8}{\}}\Tab{16}{\}}\Tab{24}{\}}} kimwitu-doc-10a+1/rewrite_view_declaration_example.tex0000440000114400011300000000030707076562262023217 0ustar piefelsimulant% Remember to use the lgrind style \File{rewrite\_view\_declaration\_example.k},{14:55},{Feb 19 1997} \L{\LB{\%\V{rview}_\V{view1}_\V{view2};_\C{}\1\* the {`}\%\' is part of the keyword \*\1\CE{}}} kimwitu-doc-10a+1/lex_input_example.tex0000444000114400011300000000101007076562263020143 0ustar piefelsimulant% Remember to use the lgrind style \File{lex\_input\_example.l},{15:00},{Sep 28 1992} \L{\LB{\C{}\1\* Lexemes \*\1\CE{}}} \L{\LB{\%\{}} \L{\LB{\K{\#include}_\S{}\"k.h\"\SE{}_}} \L{\LB{\K{\#include}_\S{}\"y.tab.h\"\SE{}_}} \L{\LB{\%\}}} \L{\LB{\%\%}} \L{\LB{[\V{a}\-\V{zA}\-\V{Z0}\-9]+}\Tab{16}{\{_\V{yylval}.\V{yt\_casestring}_=_\V{mkcasestring}(\V{yytext});_\K{return}_\V{ID};\}}} \L{\LB{[\2\V{t}\2\V{n}_]}\Tab{16}{\{_;_\}__\C{}\1\* skip the white space \*\1\CE{}}} \L{\LB{.}\Tab{16}{\{_\K{return}_\V{yytext}[0];_\}}} kimwitu-doc-10a+1/csgoutmain.tex0000444000114400011300000000133007076562264016600 0ustar piefelsimulant% Remember to use the lgrind style \File{csgoutmain.c},{14:47},{Oct 27 1992} \L{\LB{\C{}\1\* an example of structure file i\1o \*\1\CE{}}} \L{\LB{\K{\#include}_\<\V{stdio}.\V{h}\>}} \L{\LB{\K{\#include}_\S{}\"csgiok.h\"\SE{}}} \L{\LB{\K{\#include}_\S{}\"k.h\"\SE{}}} \L{\LB{}} \L{\LB{\K{void}_\V{main}()_\{}} \L{\LB{____\K{char}_\*\V{io};}} \L{\LB{____\V{funnytree}_\V{ft};}} \L{\LB{}} \L{\LB{____\V{ft}_=_\V{Str}(\V{mkcasestring}(\S{}\"foo\"\SE{}));}} \L{\LB{____\V{ft}_=_\V{Cons}(\V{ft},_\V{ft});}} \L{\LB{____\V{ft}_=_\V{Cons}(\V{ft},_\V{ft});}} \L{\LB{____\V{io}_=_\V{CSGIOwrite\_funnytree}(\V{stdout},_\V{ft});}} \L{\LB{____\K{if}_(\V{io}_!=_(\K{char}_\*)0)}\Tab{32}{\V{printf}(\S{}\"\%s\2n\"\SE{},_\V{io});}} \L{\LB{\}}} kimwitu-doc-10a+1/csginmain.tex0000444000114400011300000000116007076562264016400 0ustar piefelsimulant% Remember to use the lgrind style \File{csginmain.c},{14:47},{Oct 27 1992} \L{\LB{\C{}\1\* an example of structure file i\1o \*\1\CE{}}} \L{\LB{\K{\#include}_\<\V{stdio}.\V{h}\>}} \L{\LB{\K{\#include}_\S{}\"csgiok.h\"\SE{}}} \L{\LB{\K{\#include}_\S{}\"k.h\"\SE{}}} \L{\LB{}} \L{\LB{\K{void}_\V{main}()_\{}} \L{\LB{____\K{char}_\*\V{io};}} \L{\LB{____\V{funnytree}_\V{ft};}} \L{\LB{}} \L{\LB{____\V{io}_=_\V{CSGIOread\_funnytree}(\V{stdin},_\&\V{ft});}} \L{\LB{____\K{if}_(\V{io}==_(\K{char}_\*)0)}\Tab{24}{\V{print\_funnytree}(\V{ft});}} \L{\LB{____\K{else}}\Tab{16}{\V{printf}(\S{}\"\%s\2n\"\SE{},_\V{io});}} \L{\LB{\}}} kimwitu-doc-10a+1/natsmain.tex0000444000114400011300000000063407076562264016247 0ustar piefelsimulant% Remember to use the lgrind style \File{natsmain.c},{15:05},{Feb 20 1997} \L{\LB{\K{\#include}_\S{}\"k.h\"\SE{}}} \L{\LB{\K{\#include}_\S{}\"rk.h\"\SE{}}} \L{\LB{\V{nat}_\V{n2},_\V{n3};}} \L{\LB{}} \L{\LB{\K{void}_\V{main}()_\{}} \L{\LB{____\V{n2}_=_\V{s}(\V{s}(\V{zero}()));_\V{n3}_=_\V{s}(\V{n2});}} \L{\LB{____\V{print\_nat}(\V{rewrite\_nat}(\V{ack}(\V{n3},_\V{s}(\V{n3})),_\V{base\_rview}));}} \L{\LB{\}}} kimwitu-doc-10a+1/skimain.tex0000444000114400011300000000105607076562265016070 0ustar piefelsimulant% Remember to use the lgrind style \File{skimain.c},{15:06},{Feb 20 1997} \L{\LB{\C{}\1\* SKI expression reduction, main \*\1\CE{}}} \L{\LB{\K{\#include}_\S{}\"k.h\"\SE{}}} \L{\LB{\K{\#include}_\S{}\"rk.h\"\SE{}}} \L{\LB{\K{extern}_\V{exp}_\V{x};}} \L{\LB{}} \L{\LB{\K{void}_\V{main}()_\{}} \L{\LB{____\V{yyparse}();}} \L{\LB{____\V{print\_exp}(\V{x});}} \L{\LB{____\V{print\_exp}(\V{rewrite\_exp}(\V{x}),_\V{base\_rview});}} \L{\LB{\}}} \L{\LB{}} \L{\LB{\K{int}_\V{cplus}(\K{int}_\V{i},_\K{int}_\V{j})_\{}} \L{\LB{____\K{return}_\V{i}+\V{j};}} \L{\LB{\}}} kimwitu-doc-10a+1/yacc_input_example.tex0000444000114400011300000000127707076562265020313 0ustar piefelsimulant% Remember to use the lgrind style \File{yacc\_input\_example.y},{12:44},{Oct 26 1992} \L{\LB{\C{}\1\* Concrete syntax \*\1\CE{}}} \L{\LB{\%\{}} \L{\LB{\K{\#include}_\S{}\"k.h\"\SE{}_}} \L{\LB{_}} \L{\LB{\V{funnytree}_\V{thetree};_}} \L{\LB{\%\}_}} \L{\LB{_}} \L{\LB{\%\V{token}_\<\V{yt\_casestring}\>_\V{ID}_}} \L{\LB{_}} \L{\LB{\%\V{type}_\<\V{yt\_funnytree}\>_\V{tree}_}} \L{\LB{\%\%_}} \L{\LB{_}} \L{\LB{\V{theroot}:}\Tab{8}{\V{tree}}\Tab{28}{\{_\V{thetree}_=_\V{\$1};\};_}} \L{\LB{_}} \L{\LB{\V{tree}:}\Tab{8}{\V{ID}_}\Tab{28}{\{_\V{\$\$}_=_\V{Str}(\V{\$1});\}_}} \L{\LB{\|}\Tab{8}{\S{}\'(\'\SE{}_\V{tree}_\V{tree}_\S{}\')\'\SE{}}\Tab{28}{\{_\V{\$\$}_=_\V{Cons}(\V{\$2},_\V{\$3});\}_}} \L{\LB{;}} kimwitu-doc-10a+1/skiy.tex0000444000114400011300000000251507076562265015415 0ustar piefelsimulant% Remember to use the lgrind style \File{skiy.y},{15:28},{Jul 19 1996} \L{\LB{\C{}\1\* yacc input SKI expression \*\1\CE{}}} \L{\LB{\%\{}} \L{\LB{\K{\#include}_\S{}\"k.h\"\SE{}}} \L{\LB{}} \L{\LB{\V{exp}_\V{x};}} \L{\LB{}} \L{\LB{\V{yyerror}(\V{char}_\*\V{s})_\{}} \L{\LB{____\V{printf}(\S{}\"\%s\2n\"\SE{},_\V{s});_\V{exit}(1);}} \L{\LB{\}}} \L{\LB{\%\}_}} \L{\LB{_}} \L{\LB{\%\V{token}_\<\V{yt\_int}\>_\V{NUM}}} \L{\LB{\C{}\1\* the next 2 lines force left associativity \*\1\CE{}}} \L{\LB{\%\V{left}_\S{}\'(\'\SE{}_\S{}\'i\'\SE{}_\S{}\'s\'\SE{}_\S{}\'k\'\SE{}_\S{}\'+\'\SE{}_\V{NUM}}} \L{\LB{\%\V{left}_\V{LEFT}}} \L{\LB{_}} \L{\LB{\%\V{type}_\<\V{yt\_exp}\>_\V{exp}_}} \L{\LB{\%\%_}} \L{\LB{}} \L{\LB{\V{theroot}:}\Tab{16}{\V{exp}}\Tab{40}{\{_\V{x}_=_\V{\$1};\};_}} \L{\LB{_}} \L{\LB{\V{exp}:}\Tab{16}{\S{}\'(\'\SE{}_\V{exp}_\S{}\')\'\SE{}_}\Tab{40}{\{_\V{\$\$}_=_\V{\$2};\}_}} \L{\LB{\|}\Tab{16}{\V{exp}_\V{exp}__\%\V{prec}_\V{LEFT}}\Tab{40}{\{_\V{\$\$}_=_\V{ap}(\V{\$1},_\V{\$2});\}}} \L{\LB{\|}\Tab{16}{\S{}\'i\'\SE{}}\Tab{40}{\{_\V{\$\$}_=_\V{I}();\}}} \L{\LB{\|}\Tab{16}{\S{}\'s\'\SE{}}\Tab{40}{\{_\V{\$\$}_=_\V{S}();\}}} \L{\LB{\|}\Tab{16}{\S{}\'k\'\SE{}}\Tab{40}{\{_\V{\$\$}_=_\V{K}();\}}} \L{\LB{\|}\Tab{16}{\V{NUM}}\Tab{40}{\{_\V{\$\$}_=_\V{num}(\V{\$1});\}}} \L{\LB{\|}\Tab{16}{\S{}\'+\'\SE{}}\Tab{40}{\{_\V{\$\$}_=_\V{plus}();\}}} \L{\LB{;}} kimwitu-doc-10a+1/kimwitu_syntax.tex0000444000114400011300000002052607076562266017540 0ustar piefelsimulant% Remember to use the lgrind style \File{kimwitu\_syntax.y},{16:58},{Feb 10 1998} \L{\LB{\V{specification}:}\Tab{24}{\{_\V{phylumdeclaration}_\|_\V{includedeclaration}_\|_\V{functiondeclaration}_\|}} \L{\LB{}\Tab{24}{__\V{rwdeclaration}_\|_\V{unparsedeclaration}_\|}} \L{\LB{}\Tab{24}{__\V{rviewdeclaration}_\|_\V{uviewdeclaration}_\|_\V{storageclassdeclaration}_\}_;}} \L{\LB{}} \L{\LB{\V{phylumdeclaration}:}\Tab{24}{\V{ID}_[\V{storage\_option}]_\S{}\':\'\SE{}_[\V{productionblock}]_[\V{Ccode}]_\S{}\';\'\SE{}_;}} \L{\LB{}} \L{\LB{\V{storage\_option}:}\Tab{24}{\S{}\'\{\'\SE{}_[\S{}\'!\'\SE{}]_\V{ID}_\S{}\'\}\'\SE{}_;}} \L{\LB{}} \L{\LB{\V{productionblock}:}\Tab{24}{\S{}\'list\'\SE{}_\V{ID}_\|_\V{alternative\_list}_;}} \L{\LB{}} \L{\LB{\V{alternative\_list}:}\Tab{24}{[\V{alternative\_list}_\S{}\'\|\'\SE{}]_\V{ID}_\S{}\'(\'\SE{}_\V{arguments}_\S{}\')\'\SE{}_;}} \L{\LB{}} \L{\LB{\V{arguments}:}\Tab{24}{\{\V{ID}\}_;_}} \L{\LB{}} \L{\LB{\V{Ccode}:}\Tab{24}{\S{}\'\{\'\SE{}_[\V{Attributes}]_[\V{Cbody}]_\S{}\'\}\'\SE{}_;}} \L{\LB{}} \L{\LB{\V{Attributes}:}\Tab{24}{[\V{Attributes}]_\V{ID}_\V{ID}_[\S{}\'=\'\SE{}_\V{Cexpression}]_\S{}\';\'\SE{}_;}} \L{\LB{}} \L{\LB{\V{Cexpression}:}\Tab{24}{\C{}\1\* arbitrary C expression without \';\' and \',\' with \'\$0\' \*\1\CE{}}} \L{\LB{}} \L{\LB{\V{includedeclaration}:}\Tab{24}{\S{}\'\%\{\'\SE{}_\{\V{ID}\}_\V{includes}_\S{}\'\%\}\'\SE{}_;_\C{}\1\* the tokens are at the beginning of a line \*\1\CE{}}} \L{\LB{}\Tab{24}{\C{}\1\* ID\'s in \{ID\} are at the same line as \'\%\{\' \*\1\CE{}}} \L{\LB{}} \L{\LB{\V{includes}:}\Tab{24}{\C{}\1\* arbitrary text \*\1\CE{}_;}} \L{\LB{}} \L{\LB{\V{rwdeclaration}:}\Tab{24}{\V{outmostpatterns}_\S{}\'\-\!\>\'\SE{}_\V{rwclauses\_or\_term}_\S{}\';\'\SE{}_;}} \L{\LB{}} \L{\LB{\V{rwclauses\_or\_term}:}\Tab{24}{\{\V{rewriteclause}\}_\|_\V{outmostterm}_;}} \L{\LB{}} \L{\LB{\V{rewriteclause}:}\Tab{24}{\S{}\'\<\'\SE{}_\V{rviewnames}_\S{}\':\'\SE{}_\V{outmostterm}_\S{}\'\>\'\SE{}_;}} \L{\LB{}} \L{\LB{\V{rviewnames}:}\Tab{24}{\{\V{ID}\}_;}} \L{\LB{}} \L{\LB{\V{patternchains}:}\Tab{24}{\V{patternchain}_[\S{}\',\'\SE{}_\V{patternchains}]_;}} \L{\LB{}} \L{\LB{\V{patternchain}:}\Tab{24}{\V{patternchainitem}_[\S{}\'\&\'\SE{}_\V{patternchain}]_;}} \L{\LB{}} \L{\LB{\V{patternchainitem}:}\Tab{24}{\V{outmostpattern}}} \L{\LB{\|}\Tab{24}{\S{}\'(\'\SE{}_\V{patternchains}_\S{}\')\'\SE{}}} \L{\LB{\|}\Tab{24}{\S{}\'\$\'\SE{}_\V{ID}_\C{}\1\* this rule is to be used only in \'patternchain\' in foreach\_statement \*\1\CE{}}} \L{\LB{;}} \L{\LB{}} \L{\LB{\V{outmostpatterns}:}\Tab{24}{\V{outmostpattern}_[\S{}\',\'\SE{}_\V{outmostpatterns}]_;}} \L{\LB{}} \L{\LB{\V{outmostpattern}:}\Tab{24}{[\V{ID}_\S{}\'=\'\SE{}]_\V{ID}_[\S{}\'(\'\SE{}_[\V{patterns}]_\S{}\')\'\SE{}]__\|_\S{}\'\*\'\SE{}_\|_\S{}\'default\'\SE{}_;}} \L{\LB{}} \L{\LB{\V{patterns}:}\Tab{24}{\V{pattern}_[\S{}\',\'\SE{}_\V{patterns}]_;}} \L{\LB{}} \L{\LB{\V{pattern}:}\Tab{24}{\V{ID}_\S{}\'=\'\SE{}_\V{pattern}}} \L{\LB{\|}\Tab{24}{\V{ID}_[\S{}\'(\'\SE{}_[\V{patterns}]_\S{}\')\'\SE{}]}} \L{\LB{\|}\Tab{24}{\S{}\'\"\'\SE{}_\C{}\1\* any string of characters \*\1\CE{}_\S{}\'\"\'\SE{}}} \L{\LB{\|}\Tab{24}{\C{}\1\* a number \*\1\CE{}}} \L{\LB{\|}\Tab{24}{\S{}\'\*\'\SE{}_\|_\S{}\'default\'\SE{}}} \L{\LB{;}} \L{\LB{}} \L{\LB{\V{outmostterm}:}\Tab{24}{\V{ID}_\S{}\'(\'\SE{}_[\V{terms}]_\S{}\')\'\SE{}_;}} \L{\LB{}} \L{\LB{\V{term}:}\Tab{24}{\V{ID}_[\S{}\'(\'\SE{}_[\V{terms}]_\S{}\')\'\SE{}]_}} \L{\LB{\|}\Tab{24}{\S{}\'\"\'\SE{}_\C{}\1\* any string of characters \*\1\CE{}_\S{}\'\"\'\SE{}}} \L{\LB{\|}\Tab{24}{\C{}\1\* a number \*\1\CE{}}} \L{\LB{;}} \L{\LB{}} \L{\LB{\V{terms}:}\Tab{24}{\V{term}_[\S{}\',\'\SE{}_\V{terms}]_;}} \L{\LB{}} \L{\LB{\V{functiondeclaration}:}\Tab{24}{\V{decl\_specifiers}_\V{fn\_declarator}_\{\V{declaration}\}_\V{MainCbody}_;}} \L{\LB{}} \L{\LB{\V{decl\_specifiers}:}\Tab{24}{[\V{stor\_class\_specifier}]_[\V{type\_qualifier}]_\V{ID}_;}} \L{\LB{}} \L{\LB{\V{stor\_class\_specifier}:}\Tab{24}{\S{}\'auto\'\SE{}_\|_\S{}\'register\'\SE{}_\|_\S{}\'static\'\SE{}_\|_\S{}\'extern\'\SE{}_\|_\S{}\'typedef\'\SE{}_;}} \L{\LB{}} \L{\LB{\V{type\_qualifier}:}\Tab{24}{\S{}\'const\'\SE{}_\|_\S{}\'volatile\'\SE{}_;}} \L{\LB{}} \L{\LB{\V{fn\_declarator}:}\Tab{24}{[\V{pointer}]_\V{direct\_fn\_declarator}_;}} \L{\LB{}} \L{\LB{\V{direct\_fn\_declarator}:}\Tab{24}{\V{ID}_\S{}\'(\'\SE{}_[\V{fnarguments}]_\S{}\')\'\SE{}}} \L{\LB{\|}\Tab{24}{\V{ID}_\S{}\'(\'\SE{}_\V{parameter\_type\_list}_\S{}\')\'\SE{}}} \L{\LB{;}} \L{\LB{}} \L{\LB{\V{fnarguments}:}\Tab{24}{[\V{fnarguments}_\S{}\',\'\SE{}]_[\S{}\'\$\'\SE{}]_\V{ID}_;}} \L{\LB{}} \L{\LB{\V{parameter\_type\_list}:}\Tab{24}{\V{parameter\_list}_[\S{}\',\'\SE{}_\S{}\'.\,.\,.\'\SE{}]_;}} \L{\LB{}} \L{\LB{\V{parameter\_list}:}\Tab{24}{[\V{parameter\_list}_\S{}\',\'\SE{}]_\V{parameter\_decl}_;}} \L{\LB{}} \L{\LB{\V{parameter\_decl}:}\Tab{24}{\V{decl\_specifiers}_\V{declarator}}} \L{\LB{\|}\Tab{24}{\V{decl\_specifiers}_\V{abstract\_declarator}}} \L{\LB{;}} \L{\LB{}} \L{\LB{\V{declarator}:}\Tab{24}{[\V{pointer}]_\V{direct\_declarator}_;}} \L{\LB{}} \L{\LB{\V{pointer}:}\Tab{24}{\S{}\'\*\'\SE{}_\{\V{type\_qualifier}\}_[\V{pointer}]_;}} \L{\LB{}} \L{\LB{\V{direct\_declarator}:}\Tab{24}{[\S{}\'\$\'\SE{}]_\V{ID}}} \L{\LB{\|}\Tab{24}{\S{}\'(\'\SE{}_\V{pointer}_\V{direct\_declarator}_\S{}\')\'\SE{}}} \L{\LB{\|}\Tab{24}{\V{direct\_declarator}_\S{}\'[\'\SE{}_[\V{Cexpression}]_\S{}\']\'\SE{}}} \L{\LB{\|}\Tab{24}{\V{direct\_declarator}_\S{}\'(\'\SE{}_\V{param\_type\_list}_\S{}\')\'\SE{}}} \L{\LB{\|}\Tab{24}{\V{direct\_declarator}_\S{}\'(\'\SE{}_[\V{fnarguments}]_\S{}\')\'\SE{}}} \L{\LB{;}} \L{\LB{}} \L{\LB{\V{abstract\_declarator}:}\Tab{24}{\V{pointer}}} \L{\LB{\|}\Tab{24}{[\V{pointer}]_\V{direct\_abstract\_declarator}}} \L{\LB{;}} \L{\LB{}} \L{\LB{\V{direct\_abstract\_declarator}:}\Tab{24}{\S{}\'(\'\SE{}_\V{abstract\_declarator}_\S{}\')\'\SE{}}} \L{\LB{\|}\Tab{24}{[\V{direct\_abstract\_declarator}]_\S{}\'[\'\SE{}_[\V{Cexpression}]_\S{}\']\'\SE{}}} \L{\LB{\|}\Tab{24}{[\V{direct\_abstract\_declarator}]_\S{}\'(\'\SE{}_[\V{parameter\_type\_list}]_\S{}\')\'\SE{}}} \L{\LB{;}} \L{\LB{}} \L{\LB{\V{declaration}:}\Tab{24}{\V{decl\_specifiers}}\Tab{40}{[\V{declarator\_list}]_;}} \L{\LB{}} \L{\LB{\V{declarator\_list}:}\Tab{24}{[\V{declarator\_list}_\S{}\',\'\SE{}]_\V{declarator}_;}} \L{\LB{}} \L{\LB{\V{MainCbody}:}\Tab{24}{\V{Cbody}}} \L{\LB{\|}\Tab{24}{\S{}\'\{\'\SE{}_\V{with\_clause}_\S{}\'\}\'\SE{}_\C{}\1\* a with\_clause if a fn argument has a \$ prefix \*\1\CE{}}} \L{\LB{;}} \L{\LB{}} \L{\LB{\V{Cbody}:}\Tab{24}{\S{}\'\{\'\SE{}_\V{Ctext}_\S{}\'\}\'\SE{}_;}} \L{\LB{}} \L{\LB{\V{Ctext}:}\Tab{24}{\C{}\1\* arbitrary C text, with \$0 through \$n, }} \L{\LB{}\Tab{24}{___with\_statement and foreach\_statement \*\1\CE{}_;}} \L{\LB{}} \L{\LB{\V{foreach\_statement}:}\Tab{24}{\S{}\'foreach\'\SE{}_\S{}\'(\'\SE{}_\V{patternchain}_\S{}\';\'\SE{}_\V{IDCexpressions}_\S{}\')\'\SE{}_\V{MainCbody}}} \L{\LB{}\Tab{24}{[_\S{}\'afterforeach\'\SE{}_\S{}\'(\'\SE{}_\V{patternchain}_\S{}\')\'\SE{}_\V{MainCbodyinC}_]}} \L{\LB{;}} \L{\LB{}} \L{\LB{\V{IDCexpressions}:}\Tab{24}{\V{ID}_\V{Cexpression}_[\S{}\',\'\SE{}_\V{IDCexpressions}]_;}} \L{\LB{}} \L{\LB{\V{with\_statement}:}\Tab{24}{\S{}\'with\'\SE{}_\S{}\'(\'\SE{}_\V{Cexpressions}_\S{}\')\'\SE{}_\S{}\'\{\'\SE{}_\V{with\_clause}_\S{}\'\}\'\SE{}_;}} \L{\LB{}} \L{\LB{\V{Cexpressions}:}\Tab{24}{\V{Cexpression}_[\S{}\',\'\SE{}_\V{Cexpressions}]_;}} \L{\LB{}} \L{\LB{\V{with\_clause}:}\Tab{24}{\V{patternchains}_\S{}\':\'\SE{}_\V{Cbody}_[\V{with\_clause}]_;}} \L{\LB{}} \L{\LB{\V{unparsedeclaration}:}\Tab{24}{\V{outmostpatterns}_\S{}\'\-\!\>\'\SE{}_\{\V{unparseclause}\}_\S{}\';\'\SE{}_;}} \L{\LB{}} \L{\LB{\V{unparseclause}:}\Tab{24}{\S{}\'[\'\SE{}_\V{uviewnames}_\S{}\':\'\SE{}_\{\V{unparseitem}\}_\S{}\']\'\SE{}_;}} \L{\LB{}} \L{\LB{\V{uviewnames}:}\Tab{24}{\{\V{ID}\}_;}} \L{\LB{}} \L{\LB{\V{unparseitem}:}\Tab{24}{\S{}\'\"\'\SE{}_\C{}\1\* any string of characters\*\1\CE{}_\S{}\'\"\'\SE{}__[\S{}\':\'\SE{}_\V{ID}]}} \L{\LB{\|}\Tab{24}{[\S{}\'(\'\SE{}_\V{ID}_\S{}\')\'\SE{}]__\V{ID}__\{\S{}\'\-\!\>\'\SE{}_\V{ID}\}__[\S{}\':\'\SE{}_\V{ID}]}} \L{\LB{\|}\Tab{24}{\V{Cbody}}} \L{\LB{\|}\Tab{24}{\S{}\'\$\{\'\SE{}_\{\V{unparseitem}\}_\S{}\'\$\}\'\SE{}}} \L{\LB{;}} \L{\LB{}} \L{\LB{\V{rviewdeclaration}:}\Tab{24}{\S{}\'\%rview\'\SE{}_\V{rviewnames}_\S{}\';\'\SE{}_;}} \L{\LB{}} \L{\LB{\V{uviewdeclaration}:}\Tab{24}{\S{}\'\%uview\'\SE{}_\V{uviewnames}_\S{}\';\'\SE{}_;}} \L{\LB{}} \L{\LB{\V{storageclassdeclaration}:}\Tab{24}{\S{}\'\%storageclass\'\SE{}_\{\V{ID}\}_\S{}\';\'\SE{}_;}} kimwitu-doc-10a+1/makefile.tex0000444000114400011300000000430007076562266016206 0ustar piefelsimulant% Remember to use the lgrind style \File{makefile.make},{14:14},{Oct 27 1992} \L{\LB{\C{}\V{\#} \1\* Makefile for the term processor \*\1}} \CE{}\L{\LB{\C{}\V{\#} \1\* 2 input .k-files plus yacc and lex usage. \*\1}} \CE{}\L{\LB{\V{IT}_=_\V{example}}} \L{\LB{\V{KFILES}_=_\V{file1}.\V{k}_\V{file2}.\V{k}}} \L{\LB{\V{YOURFILES}_=_\V{\$}\{\V{KFILES}\}_\V{\$}\{\V{IT}\}\V{y}.\V{y}_\V{\$}\{\V{IT}\}\V{l}.\V{l}_\V{\$}\{\V{IT}\}\V{main}.\V{c}}} \L{\LB{\V{ALLOBJS}_=_\V{k}.\V{o}_\V{rk}.\V{o}_\V{csgiok}.\V{o}_\V{unpk}.\V{o}\2}} \L{\LB{}\Tab{8}{\V{\$}\{\V{KFILES}:\V{k}=\V{o}\}_\V{\$}\{\V{IT}\}\V{y}.\V{o}_\V{\$}\{\V{IT}\}\V{l}.\V{o}_\V{\$}\{\V{IT}\}\V{main}.\V{o}}} \L{\LB{\V{GENERATED\_BY\_KC}_=_\V{k}.\V{c}_\V{rk}.\V{c}_\V{csgiok}.\V{c}_\V{unpk}.\V{c}_\V{\$}\{\V{KFILES}:\V{k}=\V{c}\}\2}} \L{\LB{}\Tab{8}{\V{k}.\V{h}_\V{rk}.\V{h}_\V{csgiok}.\V{h}_\V{unpk}.\V{h}_\V{\$}\{\V{KFILES}:\V{k}=\V{h}\}}} \L{\LB{\V{YFLAGS}_=_\-\V{d}}} \L{\LB{}} \L{\LB{\V{\$}\{\V{IT}\}:}\Tab{32}{\V{\$}\{\V{ALLOBJS}\}}} \L{\LB{}\Tab{8}{\V{\$}\{\V{CC}\}_\V{\$}\{\V{CFLAGS}\}_\V{\$}\{\V{ALLOBJS}\}_\-\V{ll}_\-\V{o}_\V{\$}@}} \L{\LB{}} \L{\LB{\V{\$}\{\V{GENERATED\_BY\_KC}\}:}\Tab{32}{\V{kctimestamp}}} \L{\LB{}} \L{\LB{\V{kctimestamp}:}\Tab{32}{\V{\$}\{\V{KFILES}\}}} \L{\LB{}\Tab{8}{\V{kc}_\V{\$}\{\V{KFILES}\};_\V{touch}_\V{kctimestamp}}} \L{\LB{}} \L{\LB{\V{\$}\{\V{ALLOBJS}\}:}\Tab{32}{\V{k}.\V{h}}} \L{\LB{\V{\$}\{\V{IT}\}\V{main}.\V{o}_\V{\$}\{\V{IT}\}\V{l}.\V{o}:_}\Tab{32}{\V{x}.\V{tab}.\V{h}}} \L{\LB{\V{\$}\{\V{IT}\}\V{main}.\V{o}_\V{\$}\{\V{KFILES}:\V{k}=\V{o}\}:_}\Tab{32}{\V{\$}\{\V{KFILES}:\V{k}=\V{h}\}}} \L{\LB{\V{\$}\{\V{IT}\}\V{main}.\V{o}_\V{rk}.\V{o}:_}\Tab{32}{\V{rk}.\V{h}}} \L{\LB{\V{\$}\{\V{IT}\}\V{main}.\V{o}_\V{csgiok}.\V{o}:_}\Tab{32}{\V{csgiok}.\V{h}}} \L{\LB{\V{\$}\{\V{IT}\}\V{main}.\V{o}_\V{unpk}.\V{o}:_}\Tab{32}{\V{unpk}.\V{h}}} \L{\LB{}} \L{\LB{\C{}\V{\#} \1\* making copies to prevent unnecessary recompilation after yacc run \*\1}} \CE{}\L{\LB{\V{x}.\V{tab}.\V{h}:}\Tab{32}{\V{y}.\V{tab}.\V{h}}} \L{\LB{}\Tab{8}{\-\V{cmp}_\-\V{s}_\V{x}.\V{tab}.\V{h}_\V{y}.\V{tab}.\V{h}_\|\,\|_\V{cp}_\V{y}.\V{tab}.\V{h}_\V{x}.\V{tab}.\V{h}}} \L{\LB{}} \L{\LB{\C{}\V{\#} \1\* if you clean up, don\'t forget to remove the file kctimestamp \*\1}} \CE{}