minlog-4.0.99.20080304/0000755000175000017500000000000011340243353013267 5ustar barralbarralminlog-4.0.99.20080304/minlog-mode.el0000644000175000017500000002726610537561604016046 0ustar barralbarral ;;; minlog-mode.el - Minlog minor mode ;; ;; Copyright (C) 2006 Stefan Schimanski ;; This file is not part of GNU Emacs. ;; GNU Emacs is free software; you can redistribute it and/or modify ;; it under the terms of the GNU General Public License as published by ;; the Free Software Foundation; either version 2, or (at your option) ;; any later version. ;; GNU Emacs is distributed in the hope that it will be useful, ;; but WITHOUT ANY WARRANTY; without even the implied warranty of ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ;; GNU General Public License for more details. ;; You should have received a copy of the GNU General Public License ;; along with GNU Emacs; see the file COPYING. If not, write to the ;; Free Software Foundation, Inc., 59 Temple Place - Suite 330, ;; Boston, MA 02111-1307, USA. ;; ;;; Install: ;; ;; (require 'minlog-mode) ;;; Usage: ;; ;; A new minor mode minlog-font-lock-mode is defined. You can toggle ;; it by calling minlog-font-lock-mode. ;;; Code: (require 'easy-mmode) (defgroup minlog nil "Minlog minor mode" :group 'tools) (defgroup minlog-faces nil "Minlog minor mode faces" :group 'minlog) (defface minlog-default-face '((((class color)) (:background "grey95"))) "Face for a whole formula, term or type. By default it just sets the background to a light blue" :group 'minlog-faces) (defgroup minlog-formula-faces nil "Faces to color elements inside of formulas, terms and types" :group 'minlog-faces) (defface minlog-all-face '((((class color)) (:weight bold :foreground "brown" :height 1.5))) "Face for the all quantifier" :group 'minlog-formula-faces) (defface minlog-ex-face '((((class color)) (:weight bold :foreground "brown" :height 1.5))) "Face for minlog ex quantifier" :group 'minlog-formula-faces) (defface minlog-allnc-face '((((class color)) (:weight bold :foreground "sandy brown" :height 1.5))) "Face for the allnc quantifier" :group 'minlog-formula-faces) (defface minlog-exnc-face '((((class color)) (:weight bold :foreground "sandy brown" :height 1.5))) "Face for minlog exnc quantifier" :group 'minlog-formula-faces) (defface minlog-exc-face '((((class color)) (:weight bold :foreground "red" :height 1.5))) "Face for minlog exc quantifier" :group 'minlog-formula-faces) (defface minlog-paren-face '((((class color)) (:foreground "dark orange" :weight bold))) "Face for parentheses ( )" :group 'minlog-formula-faces) (defface minlog-dot-face '((((class color)) (:foreground "dark orange" :weight bold))) "Face for the dot \".\" in formulas" :group 'minlog-formula-faces) (defface minlog-type-arrow-face '((((class color)) (:foreground "dark green" :weight bold))) "Face for => arrow in types" :group 'minlog-formula-faces) (defface minlog-implication-arrow-face '((((class color)) (:foreground "black" :weight bold))) "Face for -> arrow in formulas" :group 'minlog-formula-faces) (defface minlog-variable-id-face '((((class color)) (:foreground "blue"))) "Face for variable identifiers" :group 'minlog-formula-faces) (defface minlog-type-id-face '((((class color)) (:foreground "dark green"))) "Face for type identifiers" :group 'minlog-formula-faces) (defgroup minlog-output-faces nil "Faces to color elements of the output, i.e. inside of the *scheme* buffer" :group 'minlog-faces) (defface minlog-output-formula-face '((((class color)) (:background "grey95"))) "Face for a whole formula in the scheme output" :group 'minlog-output-faces) (defface minlog-output-semicolon-face '((((class color)) (:foreground "red"))) "Face for the ; at the beginning of a line in the scheme output" :group 'minlog-output-faces) (defface minlog-output-goal-face '((((class color)) (:foreground "black" :weight bold))) "Face for the goal identifier" :group 'minlog-output-faces) (defface minlog-output-assumption-face '((((class color)) (:foreground "black" :weight bold))) "Face for the assumption identifier" :group 'minlog-output-faces) (defface minlog-output-sorry-face '((((class color)) (:foreground "red" :weight bold :height 1.5))) "Face for lines lie \"Minlog sorry\"" :group 'minlog-output-faces) (defface minlog-output-ok-face '((((class color)) (:foreground "green" :weight bold))) "Face for lines like \"ok, ...\"" :group 'minlog-output-faces) (defface minlog-output-proof-finished-face '((((class color)) (:foreground "green" :weight bold :height 2.0))) "Face the \"Proof finished.\" line" :group 'minlog-output-faces) (defface minlog-output-coq-line '((((class color)) (:foreground "black" :weight bold))) "Face for the line between assumptions and goal in coq output mode" :group 'minlog-output-faces) (setq minlog-parse-prefix "\\(?:([ \t]*p[ytf]\\|parse-\\(?:type\\|formula\\|term\\)\\)[ \t]+\"") (defvar minlog-font-lock-keywords (let ((formula-keywords (lambda (pre post) (list `("\\b\\([a-zA-Z]+\\(?:_?[0-9]+\\)?\\)\\b" ,pre ,post (1 'minlog-variable-id-face prepend)) `("\\bold\\(nat\\|rat\\|real\\|pos\\|int\\|list\\|tsil\\)\\b" ,pre ,post (1 'minlog-type-id-face prepend)) `("\\ball\\b" ,pre ,post (0 'minlog-all-face prepend)) `("\\ballnc\\b" ,pre ,post (0 'minlog-allnc-face prepend)) `("\\bex\\b" ,pre ,post (0 'minlog-ex-face prepend)) `("\\bexnc\\b" ,pre ,post (0 'minlog-exnc-face prepend)) `("\\bexc\\b" ,pre ,post (0 'minlog-exc-face prepend)) `("[()]" ,pre ,post (0 'minlog-paren-face prepend)) `("\\." ,pre ,post (0 'minlog-dot-face prepend)) `("=>" ,pre ,post (0 'minlog-type-arrow-face prepend)) `("->" ,pre ,post (0 'minlog-implication-arrow-face prepend))))) (search-for-pre (lambda (start limit-regexp) `(progn ,start (let ((limit (min (+ (point) 10000) (point-max)))) (save-match-data (if (save-excursion (search-forward ,limit-regexp limit t)) (match-beginning 0) limit)))) )) (prompt "^\\(?:\\(?:guile\\)?>[ \t]*\\)*")) (list ;; ;; formulas in assumptions and goals ;; (append ;; (list ;; (lambda (limit) ;; (let ( ;; ; some ">" followed by ";", then either >=3 spaces and "foo bar:" or "?_foo", finally : ;; (start "^\\(?:\\(?:guile\\)?>[ \t]+\\)*;\\(?: \\?[^ \t:\n]*\\| [^ \t\n:][^\t:\n]*\\):[ \t]*") ;; ; follow ups of started formulas on new lines: >=5 leading spaces ;; (indented-line "; \\{5,\\}") ;; ; any text, followed by \n and >=5 spaces, repeated; finally one line ;; (body "\\(\\(?:.*\n; \\{5,\\}.*\\)*.*?\\)") ;; ; ending with $ or " from" ;; (end "\\(?: from\\)?$")) ;; ; are we on a start line or followup? ;; (if (save-excursion ;; (beginning-of-line) ;; (or (looking-at indented-line) ;; (looking-at start))) ;; (let ((start-point (point))) ;; ; yes => look backwards until we are on a start line ;; (beginning-of-line) ;; (while (and (looking-at indented-line) ;; (not (looking-at start)) ;; (= (forward-line -1) 0) ;; (> (point) (point-min))) nil) ;; ; look for assumption or goal block ;; (if (re-search-forward (concat start body end) (min (point-max) (+ (point) 10000)) t) ;; ; fount => does it end after our original position? ;; (if (> (point) start-point) ;; ; yes => we found a formula to highlight ;; t ;; ; no => the formula found stops before are original ;; ; position, i.e. it should have been highlighted already ;; ; Hence, go back there and look forward ;; (progn (goto-char start-point) ;; (re-search-forward (concat start body end) (min (point-max) (+ (point) 10000)) t))) ;; ; no => nothing to be found until limit ;; (progn (goto-char limit) nil))) ;; ; no => look forward ;; (re-search-forward (concat start body end) (min (point-max) (+ (point) 10000)) t)))) ;; '(1 'minlog-output-formula-face t)) ;; (funcall formula-keywords ;; '(progn (goto-char (match-beginning 1)) (match-end 1)) ;; '(progn (goto-char (match-end 0))))) ;; assumption (append (list (concat prompt "; \\([^? \t][^\n:]*:\\)\\(.*\\)$") '(1 'minlog-output-assumption-face prepend) '(2 'minlog-output-formula-face prepend)) (funcall formula-keywords '(goto-char (match-beginning 2)) 'nil)) ;; goals (append (list (concat prompt "; \\(\\?_[0-9]+:\\)\\(.*?\\)\\( from$\\|$\\)") '(1 'minlog-output-goal-face prepend) '(2 'minlog-output-formula-face prepend)) (funcall formula-keywords '(goto-char (match-beginning 2)) 'nil)) ;; indented formula (append (list (concat prompt "; \\{5,\\}\\(.*\\)$") '(1 'minlog-output-formula-face prepend)) (funcall formula-keywords '(goto-char (match-beginning 1)) 'nil)) ;; lines starting with some > and then a ; (let ((pre '(goto-char (match-beginning 1))) (post '(goto-char (match-end 0)))) (list (concat prompt "\\(;\\) .*$") ;; the ; '(1 'minlog-output-semicolon-face prepend) ;; assumptions ; `("; \\([^? \t][^\n:]*:\\)" ,pre ,post (1 'minlog-output-assumption-face prepend)) ;; goals ; `("; \\(\\?_[0-9]+:\\)" ,pre ,post (1 'minlog-output-goal-face prepend)) ;; indented formula ; `("; \\{5,\\}\\(.*\\)$" ,pre ,post (1 'minlog-default-face prepend)) ;; the magic sentence you are after `("Proof finished\\." ,pre ,post (0 'minlog-output-proof-finished-face prepend)) ;; ok, .... `("; \\(ok\\)," ,pre ,post (1 'minlog-output-ok-face prepend)) ;; Minlog "sorry" `("\\(Minlog \"sorry\"\\)" ,pre ,post (1 'minlog-output-sorry-face prepend)) ;; lines from the coq output `("; \\(-----*\\) \\(\\?_[0-9]+\\)$" ,pre ,post (1 'minlog-output-coq-line prepend) (2 'minlog-output-goal-face prepend)) )) ;; expression like (pf "formula"), (pt "term"), (py "type") (append (list (concat minlog-parse-prefix "\\([^\"]*\\)\\(?:\"\\|$\\)") '(1 'minlog-default-face t)) (funcall formula-keywords (funcall search-for-pre '(goto-char (match-beginning 1)) "\"") nil)))) "Faces to highlight minlog formulas") ;(makunbound 'minlog-font-lock-keywords) (easy-mmode-define-minor-mode minlog-font-lock-mode "Highlight minlog formulas" nil " MinlogFontLock") (defun minlog-font-lock-mode-on () (font-lock-add-keywords nil minlog-font-lock-keywords) (setq font-lock-multiline t) (when font-lock-fontified (font-lock-fontify-buffer))) (defun minlog-font-lock-mode-off () (font-lock-remove-keywords nil minlog-font-lock-keywords) (when font-lock-fontified (font-lock-fontify-buffer))) (add-hook 'minlog-font-lock-mode-on-hook 'minlog-font-lock-mode-on) (add-hook 'minlog-font-lock-mode-off-hook 'minlog-font-lock-mode-off) (provide 'minlog-mode) minlog-4.0.99.20080304/Makefile0000644000175000017500000000616010763221121014727 0ustar barralbarral## $Id: Makefile,v 1.34 2008/02/08 10:20:09 logik Exp $ # There are several possibilities to install Minlog: # INSTALLATION IN CURRENT DIRECTORY # > make # INSTALLATION IN LOCAL DIRECTORY (i.e. /usr/local) # > make install # INSTALLATION IN SYSTEMS DIRECTORIES (i.e. /usr) # make install DESTDIR=/usr INSTALL=install INSTALL_FILE=$(INSTALL) -D -p -m 644 INSTALL_DIR=$(INSTALL) -p -d -m 755 DESTDIR=/usr/local PREFIX= MINLOGDIR=$(DESTDIR)/share/minlog DOCDIR=$(DESTDIR)/share/doc/minlog BINDIR=$(DESTDIR)/bin ELDIR=$(DESTDIR)/share/emacs/site-lisp/minlog ## Makefile for Minlog. As most of the minlog project are just scheme sources ## there's not much to be done. However it's nice to have some form of ## interface for it. ## The semantics of all the Makefiles used is the same: ## -- .dep is the file that signals, when the dependencies have been ## checked for the last time. This file is always younger ## than the last check of all dependencies in all subdirectories ## -- clean the target used to clean up the source. Should also remove ## all the .dep files. all: src init.scm mpc minlog minlog.el doc install: src init.scm mpc minlog minlog.el doc $(INSTALL_DIR) $(PREFIX)$(MINLOGDIR) $(PREFIX)$(BINDIR) $(PREFIX)$(ELDIR) sed "s%---MINLOGPATH---%"$(MINLOGDIR)"%g; s%---MINLOGELPATH---%"$(ELDIR)"%g" < src/minlog.el > $(PREFIX)$(ELDIR)/minlog.el $(INSTALL_FILE) minlog-mode.el $(PREFIX)$(ELDIR)/minlog-mode.el sed "s%---MINLOGPATH---%"$(ELDIR)"%g" < src/minlog > $(PREFIX)$(BINDIR)/minlog chmod a+x $(PREFIX)$(BINDIR)/minlog sed "s%---MINLOGPATH---%"$(MINLOGDIR)"%g; s%(minlog-load \"examples/\" path))%(load (string-append \""$(DOCDIR)"/examples/\" path)))%g" < src/init.scm > $(PREFIX)$(MINLOGDIR)/init.scm (cd src; find . -name '*.scm' -type f -exec $(INSTALL_FILE) {} $(PREFIX)$(MINLOGDIR)/src/{} \;) (cd lib; find . -name '*.scm' -type f -exec $(INSTALL_FILE) {} $(PREFIX)$(MINLOGDIR)/lib/{} \;) (cd modules; find . -name '*.scm' -type f -exec $(INSTALL_FILE) {} $(PREFIX)$(MINLOGDIR)/modules/{} \;) (cd examples; find . -type f -a ! -path "*/CVS/*" -a ! -name ".cvsignore" -exec $(INSTALL_FILE) {} $(PREFIX)$(DOCDIR)/examples/{} \;) (cd doc; find . -name '*.pdf' -type f -exec $(INSTALL_FILE) {} $(PREFIX)$(DOCDIR)/{} \;) minlog.el: src/minlog.el src sed "s%---MINLOGPATH---%`pwd`%g; s%---MINLOGELPATH---%`pwd`%g" < src/minlog.el > minlog.el minlog: src/minlog src sed "s%---MINLOGPATH---%`pwd`%g" < src/minlog > minlog chmod a+x minlog init.scm: src/init.scm src sed "s%---MINLOGPATH---%`pwd`%g" < src/init.scm > init.scm mpc: src/mpc init.scm sed "s%---MINLOGPATH---%`pwd`%g" < src/mpc > mpc chmod a+x mpc doc: doc/.dep doc/.dep: (cd doc; $(MAKE) .dep) src: src/.dep.notags src/.dep.notags: (cd src; $(MAKE) .dep.notags) test: examples/.TEST newtest: (cd examples; $(MAKE) clean) ($(MAKE) test) alltest: (cd examples; $(MAKE) clean) ($(MAKE) -k -i test) examples/.TEST: init.scm (cd examples; $(MAKE) .TEST) clean: rm -rf *~ rm -rf init.scm mpc minlog minlog.el welcome.scm (cd src; $(MAKE) clean) (cd doc; $(MAKE) clean) (cd examples; $(MAKE) clean) # git demo # git demo 3 minlog-4.0.99.20080304/build.log0000644000175000017500000002517511340061020015070 0ustar barralbarraldpkg-buildpackage: setze CFLAGS auf Standardwert: -g -O2 dpkg-buildpackage: setze CPPFLAGS auf Standardwert: dpkg-buildpackage: setze LDFLAGS auf Standardwert: -Wl,-Bsymbolic-functions dpkg-buildpackage: setze FFLAGS auf Standardwert: -g -O2 dpkg-buildpackage: setze CXXFLAGS auf Standardwert: -g -O2 dpkg-buildpackage: Quellpaket minlog dpkg-buildpackage: Quellversion 4.0.99.20080304-4.2 dpkg-buildpackage: Quellen geändert durch Freiric Barral dpkg-buildpackage: Host-Architektur i386 dh_testdir dh_testroot rm -f build-stamp configure-stamp # Add here commands to clean up after the build process. /usr/bin/make clean make[1]: Betrete Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304' rm -rf *~ rm -rf init.scm mpc minlog minlog.el welcome.scm (cd src; /usr/bin/make clean) make[2]: Betrete Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/src' rm -rf .dep .dep.* rm -rf TAGS rm -rf *~ *% rm -rf grammar.log make[2]: Verlasse Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/src' (cd doc; /usr/bin/make clean) make[2]: Betrete Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/doc' rm -rf *.aux *.log *.blg *.bbl *.idx *.toc *.ind *.ilg *.brf *.out rm -rf .dep .dep.* rm -rf *.dvi *.pdf *.ps rm -rf *~ *% ls -sh insgesamt 756K 4,0K acknow.tex 28K bussproofs.sty 8,0K infrule.sty 4,0K Makefile 4,0K manual.txt 16K minlog.bib 20K minlog.mac 324K mlcf.tex 32K mpcref.tex 16K notation.sty 12K reflection_manual.tex 220K ref.tex 68K tutor.tex make[2]: Verlasse Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/doc' (cd examples; /usr/bin/make clean) make[2]: Betrete Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/examples' rm -f *~ core *.diff *.out .TEST *.nodigits .*.test-passed (cd classical; /usr/bin/make clean) make[3]: Betrete Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/examples/classical' rm -f *~ core *.diff *.out .TEST *.nodigits .*.test-passed make[3]: Verlasse Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/examples/classical' (cd hounif; /usr/bin/make clean) make[3]: Betrete Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/examples/hounif' rm -f *~ core *.diff *.out .TEST *.nodigits .*.test-passed make[3]: Verlasse Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/examples/hounif' (cd prop; /usr/bin/make clean) make[3]: Betrete Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/examples/prop' rm -f *~ core *.diff *.out .TEST *.nodigits .*.test-passed make[3]: Verlasse Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/examples/prop' (cd quant; /usr/bin/make clean) make[3]: Betrete Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/examples/quant' rm -f *~ core *.diff *.out .TEST *.nodigits .*.test-passed make[3]: Verlasse Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/examples/quant' (cd warshall; /usr/bin/make clean) make[3]: Betrete Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/examples/warshall' rm -f *~ core *.diff *.out .TEST *.nodigits .*.test-passed make[3]: Verlasse Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/examples/warshall' (cd dijkstra; /usr/bin/make clean) make[3]: Betrete Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/examples/dijkstra' rm -f *~ core *.diff *.out .TEST *.nodigits .*.test-passed make[3]: Verlasse Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/examples/dijkstra' (cd bar; /usr/bin/make clean) make[3]: Betrete Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/examples/bar' rm -f *~ core *.diff *.out .TEST *.nodigits .*.test-passed make[3]: Verlasse Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/examples/bar' (cd dc; /usr/bin/make clean) make[3]: Betrete Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/examples/dc' rm -f *~ core *.diff *.out .TEST *.nodigits .*.test-passed make[3]: Verlasse Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/examples/dc' (cd arith; /usr/bin/make clean) make[3]: Betrete Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/examples/arith' rm -f *~ core *.diff *.out .TEST *.nodigits .*.test-passed (cd quotrem; /usr/bin/make clean) make[4]: Betrete Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/examples/arith/quotrem' rm -f *~ core *.diff *.out .TEST *.nodigits .*.test-passed make[4]: Verlasse Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/examples/arith/quotrem' make[3]: Verlasse Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/examples/arith' make[2]: Verlasse Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/examples' make[1]: Verlasse Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304' dh_clean dpkg-source: Information: verwende Quellformat »1.0« dpkg-source: Information: baue minlog unter Benutzung des existierenden minlog_4.0.99.20080304.orig.tar.gz dpkg-source: Information: baue minlog in minlog_4.0.99.20080304-4.2.diff.gz dpkg-source: Information: baue minlog in minlog_4.0.99.20080304-4.2.dsc dh_testdir # Add here commands to configure the package. touch configure-stamp dh_testdir # Add here commands to compile the package. /usr/bin/make DESTDIR=/usr make[1]: Betrete Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304' (cd src; /usr/bin/make .dep.notags) make[2]: Betrete Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/src' make[2]: Für das Ziel ».dep.notags« ist nichts zu tun. make[2]: Verlasse Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/src' sed "s%---MINLOGPATH---%`pwd`%g" < src/init.scm > init.scm sed "s%---MINLOGPATH---%`pwd`%g" < src/mpc > mpc chmod a+x mpc sed "s%---MINLOGPATH---%`pwd`%g" < src/minlog > minlog chmod a+x minlog sed "s%---MINLOGPATH---%`pwd`%g; s%---MINLOGELPATH---%`pwd`%g" < src/minlog.el > minlog.el (cd doc; /usr/bin/make .dep) make[2]: Betrete Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/doc' pdflatex mlcf >> /dev/null bibtex -terse mlcf makeindex -q mlcf pdflatex mlcf >> /dev/null pdflatex mlcf >> /dev/null pdflatex mpcref.tex >> /dev/null pdflatex mpcref.tex >> /dev/null pdflatex ref >> /dev/null bibtex -terse ref makeindex -q ref pdflatex ref >> /dev/null pdflatex ref >> /dev/null pdflatex reflection_manual.tex >> /dev/null pdflatex reflection_manual.tex >> /dev/null pdflatex tutor >> /dev/null bibtex -terse tutor pdflatex tutor >> /dev/null rm -rf *.aux *.log *.blg *.bbl *.idx *.toc *.ind *.ilg *.brf *.out touch .dep make[2]: Verlasse Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/doc' make[1]: Verlasse Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304' #docbook-to-man debian/minlog.sgml > minlog.1 touch build-stamp dh_testdir dh_testroot dh_clean -k dh_installdirs # Add here commands to install the package into debian/minlog. /usr/bin/make install DESTDIR=/usr PREFIX=/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/debian/minlog make[1]: Betrete Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304' (cd src; /usr/bin/make .dep.notags) make[2]: Betrete Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/src' make[2]: Für das Ziel ».dep.notags« ist nichts zu tun. make[2]: Verlasse Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/src' install -p -d -m 755 /home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/debian/minlog/usr/share/minlog /home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/debian/minlog/usr/bin /home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/debian/minlog/usr/share/emacs/site-lisp/minlog sed "s%---MINLOGPATH---%"/usr/share/minlog"%g; s%---MINLOGELPATH---%"/usr/share/emacs/site-lisp/minlog"%g" < src/minlog.el > /home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/debian/minlog/usr/share/emacs/site-lisp/minlog/minlog.el install -D -p -m 644 minlog-mode.el /home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/debian/minlog/usr/share/emacs/site-lisp/minlog/minlog-mode.el sed "s%---MINLOGPATH---%"/usr/share/emacs/site-lisp/minlog"%g" < src/minlog > /home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/debian/minlog/usr/bin/minlog chmod a+x /home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/debian/minlog/usr/bin/minlog sed "s%---MINLOGPATH---%"/usr/share/minlog"%g; s%(minlog-load \"examples/\" path))%(load (string-append \""/usr/share/doc/minlog"/examples/\" path)))%g" < src/init.scm > /home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/debian/minlog/usr/share/minlog/init.scm (cd src; find . -name '*.scm' -type f -exec install -D -p -m 644 {} /home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/debian/minlog/usr/share/minlog/src/{} \;) (cd lib; find . -name '*.scm' -type f -exec install -D -p -m 644 {} /home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/debian/minlog/usr/share/minlog/lib/{} \;) (cd modules; find . -name '*.scm' -type f -exec install -D -p -m 644 {} /home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/debian/minlog/usr/share/minlog/modules/{} \;) (cd examples; find . -type f -a ! -path "*/CVS/*" -a ! -name ".cvsignore" -exec install -D -p -m 644 {} /home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/debian/minlog/usr/share/doc/minlog/examples/{} \;) (cd doc; find . -name '*.pdf' -type f -exec install -D -p -m 644 {} /home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304/debian/minlog/usr/share/doc/minlog/{} \;) make[1]: Verlasse Verzeichnis '/home/barral/Documents/mathMuenchen/minlogdeb/minlog-4.0.99.20080304' dh_testdir dh_testroot dh_installchangelogs dh_installdocs dh_installexamples dh_installman debian/minlog.1 dh_link dh_strip dh_compress -Xminlog/examples dh_fixperms dh_installdeb dh_shlibdeps dh_gencontrol dh_md5sums dh_builddeb dpkg-deb: Baue Paket »minlog« in »../minlog_4.0.99.20080304-4.2_all.deb«. dpkg-buildpackage: Binär und Diff hochzuladen (Originalquellen NICHT enthalten) minlog-4.0.99.20080304/misc/0000755000175000017500000000000010746604554014237 5ustar barralbarralminlog-4.0.99.20080304/misc/norm.scm0000644000175000017500000001007210746604554015716 0ustar barralbarral;; $Id: norm.scm,v 1.2 2008/01/25 13:30:30 logik Exp $ ;; This is a ``toy implementation'' of the special reduction strategie ;; of [1] ;; TODO/BUGS ;; ========= ;; * Handle input/output ;; * define constructor functions for every type... ;; ------------------------------------------ ;; As normalisation only has to care about applications (abstractions and ;; constants *are* normal), the only thing we require at the interface ;; are applications. ;; There are ``free'' applications and special ones. For free applications ;; the require a test function, a constructor and selection functions ;; for left and right part. ;; For special applications we require a moreover a selection function ;; for the kind of application. (define (make-free-app left right) (list 'freeapp left right)) (define (free-app? obj) (cond ((null? obj) #f) ((list? obj) (equal? (car obj) 'freeapp)) (#t #f))) (define (free-app-left obj) (cadr obj)) (define (free-app-right obj) (caddr obj)) (define (make-app type left right) (list 'app type left right)) (define (app? obj) (cond ((null? obj) #f) ((list? obj) (equal? (car obj) 'app)) (#t #f))) (define (app-type obj) (cadr obj)) (define (app-left obj) (caddr obj)) (define (app-right obj) (cadddr obj)) ;; ------------------------------------------ ;; For every type we have to define a function returning the ``canonical ;; reduct'' and, to make the interface look nicer also constructor functions ;; Lists ;; ----- ;;; constructor symbols: l-cons l-nil ;;; elimination term: ('l-elim step initial) (define (list-can-red left right) (cond ((not (equal? (car right) 'l-elim)) (error "Illegal list application")) ((equal? left 'l-nil) (caddr right)) ;; ;; If it's not nil, than it has to be something of the form: ;; (freeapp (freeapp 'l-cons a) l) ;; ;; TODO: we don't test it, we just asume it's correct (#t (lambda-apply (lambda-apply (cadr right)(free-app-right (free-app-left left))) (list-can-red (free-app-right left) right))))) ;; Lambda terms ;; ------------ ;;; Sytax ('lambda-abs term) ;;;;; Variables as ('lambda-var n) ;;; elimination term: ('lambda-elim s) (define (lambda-apply left right) (make-app 'lambda left (list 'lambda-elim right))) (define (lambda-can-red left right) (cond ((not (equal? (car right) 'lambda-elim)) (error "Illegal lambda application")) (#t (lambda-substitute 1 (cadr left) (cadr right))))) (define (lambda-substitute n left right) (cond ((not (list? left)) left) ((null? left) left) ((equal? (car left) 'lambda-abs) (map (lambda (left) (lambda-substitute (+ n 1) left right)) left)) ((equal? (car left) 'lambda-var) (if (= (cadr left) n) right left)) (#t (map (lambda (left) (lambda-substitute n left right)) left)))) ;; ------------------------------------------ ;; Finally we collect all these canonical reductions to a single function (define (can-red type left right) (cond ((equal? type 'list)(list-can-red left right)) ((equal? type 'lambda)(lambda-can-red left right)) (#t (error "Unknown application")))) ;; ------------------------------------------ ;; And here is the normalisaiton strategie of [1] (define (normalize term) (cond ((free-app? term) (make-free-app (normalize (free-app-left term)) (normalize (free-app-right term)))) ((app? term)(normalize (can-red (app-type term) (normalize (app-left term)) (app-right term)))) (#t term) ;; everything that is not an application or a free application] ;; is normal... )) ;; ------------------------------------------ ;; examples... (define list-1 '(freeapp (freeapp l-cons A) (freeapp (freeapp l-cons B) l-nil))) (define lambda-term-1 '(lambda-abs (lambda-abs (freeapp (lambda-var 2) (lambda-var 1))))) (define term-1 (list 'app 'list list-1 (list 'l-elim lambda-term-1 '*nil*))) ;(trace normalize) (normalize term-1) ;; ------------------------------------------ ;; References: ;; =========== ;; [1] Aehlig, Schwichtenberg. ``A syntactical analysis of non-sizeincreasing ;; polynomial time computation'', LICS00 minlog-4.0.99.20080304/debian/0000755000175000017500000000000011340243353014511 5ustar barralbarralminlog-4.0.99.20080304/debian/changelog0000644000175000017500000000256011340243302016360 0ustar barralbarralminlog (4.0.99.20100221-5) unstable; urgency=low (high for users of mzsccheme) * Closes: #570235 due to incompatibility between mzscheme and r5rs * upate to svn head -- Freiric Barral Sat, 20 Feb 2010 17:55:50 +0100 minlog (4.0.99.20080304-4.1) unstable; urgency=low * Non-maintainer upload. * add ${misc:Depends} * Change Build dependency fronm tetex to texlive (Closes: #562301) -- Christoph Egger Sat, 13 Feb 2010 00:17:04 +0100 minlog (4.0.99.20080304-4) unstable; urgency=low * CVS snapshot -- Freiric Barral Tue, 04 Mar 2008 11:21:04 +0100 minlog (4.0.99-4) unstable; urgency=low * Added missing copyright notices. * Initial upload closes: #406186 -- Freiric Barral Fri, 12 Oct 2007 13:55:13 +0100 minlog (4.0.99-3) unstable; urgency=low * XEmacs does not work with Minlog. So recommend either emacs21 or emacs22 (though the latter is not available yet). -- Freiric Barral Wed, 3 Nov 2006 19:28:10 +0100 minlog (4.0.99-2) unstable; urgency=low * Use debhelper v4 and therefore put everything into debian/minlog -- Freiric Barral Wed, 2 Nov 2006 19:27:10 +0100 minlog (4.0.99-1) unstable; urgency=low * Initial release -- Freiric Barral Wed, 1 Nov 2006 19:26:10 +0100 minlog-4.0.99.20080304/debian/rules0000755000175000017500000000333211340011070015557 0ustar barralbarral#!/usr/bin/make -f # -*- makefile -*- # Sample debian/rules that uses debhelper. # This file was originally written by Joey Hess and Craig Small. # As a special exception, when this file is copied by dh-make into a # dh-make output file, you may use that output file without restriction. # This special exception was added by Craig Small in version 0.37 of dh-make. # Uncomment this to turn on verbose mode. #export DH_VERBOSE=1 CFLAGS = -Wall -g ifneq (,$(findstring noopt,$(DEB_BUILD_OPTIONS))) CFLAGS += -O0 else CFLAGS += -O2 endif configure: configure-stamp configure-stamp: dh_testdir # Add here commands to configure the package. touch configure-stamp build: build-stamp build-stamp: configure-stamp dh_testdir # Add here commands to compile the package. $(MAKE) DESTDIR=/usr #docbook-to-man debian/minlog.sgml > minlog.1 touch build-stamp clean: dh_testdir dh_testroot rm -f build-stamp configure-stamp # Add here commands to clean up after the build process. $(MAKE) clean dh_clean install: build dh_testdir dh_testroot dh_clean -k dh_installdirs # Add here commands to install the package into debian/minlog. $(MAKE) install DESTDIR=/usr PREFIX=$(CURDIR)/debian/minlog # Build architecture-independent files here. binary-indep: build install dh_testdir dh_testroot dh_installchangelogs dh_installdocs dh_installexamples dh_installman debian/minlog.1 dh_link dh_strip dh_compress -Xminlog/examples dh_fixperms dh_installdeb dh_shlibdeps dh_gencontrol dh_md5sums dh_builddeb # Build architecture-dependent files here. binary-arch: build install # We have nothing to do by default. binary: binary-indep binary-arch .PHONY: build clean binary-indep binary-arch binary install configure minlog-4.0.99.20080304/debian/copyright0000644000175000017500000000643711340011070016443 0ustar barralbarralThis package was debianized by Freiric Barral on Wed, 1 Nov 2006 19:26:10 +0100. It was downloaded from http://www.minlog-system.de/ Here is a list of files followed by their respective copyright holder and license: logical.scm: bit access and operations for integers for Scheme Copyright (C) 1991, 1993, 2001, 2003, 2005 Aubrey Jaffer Permission to copy this software, to modify it, to redistribute it, to distribute modified versions, and to use it for any purpose is granted, subject to the following restrictions and understandings. 1. Any copy made of this software must include this copyright notice in full. 2. I have made no warranty or representation that the operation of this software will be error-free, and I am under no obligation to rovide any services, by way of maintenance, update, or otherwise. 3. In conjunction with products arising from the use of this material, there shall be no use of my name in any advertising, promotional, or sales literature without prior written consent in each case. pp-sexp.scm (modification of genwrite.scm): generic write used by pretty-print and truncated-print. Copyright (c) 1991, Marc Feeley Author: Marc Feeley (feeley@iro.umontreal.ca) Distribution restrictions: none Modified for Minlog by Stefan Schimanski Taken from slib 2d6, genwrite.scm and pp.scm lr-dvr.scm, lalr.scm: Copyright (C) 1984, 1989, 1990 Free Software Foundation, Inc. (for the Bison source code translated in Scheme) Copyright (C) 1996 Dominique Boucher (for the translation in Scheme) lalr.scm and lr-dvr.scm are free software; you can redistribute them and/or modify them under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. lalr.scm and lr-dvr.scm are distributed in the hope that they will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with lalr.scm; see the file COPYING. If not, write to the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. Dominique Boucher -- Universite de Montreal Send questions, comments or suggestions to boucherd@iro.umontreal.ca bussproofs.sty. Version 0.6c: (c) 1994,1995,1996. Copyright retained by Samuel R. Buss. This software may be used and distributed freely, except that if you make changes, you must change the file name to be different than bussproofs.sty to avoid compatibility problems. This is a *prerelease* version and is subject to change. Please report comments and bugs to sbuss@ucsd.edu. All other files have the following copyright holder and license: Copyright Holder: Helmut Schwichtenberg (schwicht@math.lmu.de) and members of the logic group (logik@math.lmu.de) minlog mail is minlog@mathematik.uni-muenchen.de License: You are free to distribute this software under the terms of the GNU General Public License. On Debian systems, the complete text of the GNU General Public License can be found in the file `/usr/share/common-licenses/GPL'. minlog-4.0.99.20080304/debian/control0000644000175000017500000000254511340011070016107 0ustar barralbarralSource: minlog Section: math Priority: optional Maintainer: Freiric Barral Build-Depends: debhelper (>= 4.0.0), mzscheme, texlive (>= 2007-11) Standards-Version: 3.7.2 Package: minlog Architecture: all Depends: mzscheme | guile Recommends: emacs22 | emacs21 | emacsen Suggests: proofgeneral-minlog, quack-el Description: Proof assistant based on first order natural deduction calculus intended to reason about computable functionals, using minimal rather than classical or intuitionistic logic. The main motivation behind MINLOG is to exploit the proofs-as-programs paradigm for program development and program verification. Proofs are in fact treated as first class objects which can be normalized. If a formula is existential then its proof can be used for reading off an instance of it, or changed appropriately for program development by proof transformation. To this end MINLOG is equipped with tools to extract functional programs directly from proof terms. This also applies to non-constructive proofs, using a refined A-translation. The system is supported by automatic proof search and normalization by evaluation as an efficient term rewriting device. . Minlog can be used with ProofGeneral, which allows proofs to be edited using emacs and xemacs. This requires the proofgeneral-minlog package to be installed. minlog-4.0.99.20080304/debian/compat0000644000175000017500000000000211340011070015674 0ustar barralbarral4 minlog-4.0.99.20080304/debian/minlog.10000644000175000017500000000132511340011070016046 0ustar barralbarral.TH MINLOG 1 .SH NAME minlog \- starts Emacs and runs Minlog in it. .SH SYNOPSIS .B minlog files .SH "DESCRIPTION" This manual page documents the .BR minlog program. This manual page was written for the Debian GNU/Linux distribution because the original program does not have a manual page. .PP The .B minlog command starts Emacs (by default, set $EMACS to switch to your Emacs version of choice), loads the minlog-mode and open standard Minlog buffers. .SH OPTIONS .TP .B files One or more scheme files for the Minlog theorem prover. Usually these files end with .B .scm . .SH AUTHOR This manual page was written by Stefan Schimanski , for the Debian GNU/Linux system (but may be used by others). minlog-4.0.99.20080304/modules/0000755000175000017500000000000010763221143014740 5ustar barralbarralminlog-4.0.99.20080304/modules/type-inf.scm0000755000175000017500000003077510747326134017226 0ustar barralbarral; $Id: type-inf.scm,v 1.9 2008/01/28 09:20:12 logik Exp $ ; 14. Hindley's Type Inference Algorithm ; ====================================== ; The type free expressions may be introduced as elements of an ; appropriate free algebra. We can then perform type inference with ; Hindley's algorithm. This may be useful for a simple method to input ; terms, where typability is checked and the most general type is ; inferred (as in ML). ; eqs has the form ((type11 type12) (type21 type22) ...), considered as ; a list of equations. It is assumed that all types are built from type ; variables by arrow and star. type-martmont implements the ; Martelli-Montanari unification algorithm (cf proofth/ss01/pt.tex). ; We may also extract the Martelli-Montanari unification algorithm as ; well as Hindley's type inference algorithm from the corresponding ; proofs. Then we would need to introduce the data structures as free ; algebras. However, for simplicity we do not do this at the moment. (define (type-occurs? tvar type) (or (equal? tvar type) (and (arrow-form? type) (or (type-occurs? tvar (arrow-form-to-arg-type type)) (type-occurs? tvar (arrow-form-to-val-type type)))) (and (star-form? type) (or (type-occurs? tvar (star-form-to-left-type type)) (type-occurs? tvar (star-form-to-right-type type)))))) (define (type-martmont eqs) (if (null? eqs) empty-subst (let* ((first (car eqs)) (l (car first)) (r (cadr first))) (if (tvar? l) (if (equal? l r) (type-martmont (cdr eqs)) (if (type-occurs? l r) #f (let ((prev (type-martmont (map (lambda (x) (list (type-subst (car x) l r) (type-subst (cadr x) l r))) (cdr eqs))))) (if prev (compose-t-substitutions (list (list l r)) prev) #f)))) (if (tvar? r) (type-martmont (cons (list r l) (cdr eqs))) (cond ((and (arrow-form? l) (arrow-form? r)) (type-martmont (cons (list (arrow-form-to-arg-type l) (arrow-form-to-arg-type r)) (cons (list (arrow-form-to-val-type l) (arrow-form-to-val-type r)) (cdr eqs))))) ((and (star-form? l) (star-form? r)) (type-martmont (cons (list (star-form-to-left-type l) (star-form-to-left-type r)) (cons (list (star-form-to-right-type l) (star-form-to-right-type r)) (cdr eqs))))) (else #f))))))) ; Complete test: (define (tvar-arrow-star-type? x) (and (type-form? x) (case (tag x) ((tvar) (tvar? x)) ((arrow) (and (tvar-arrow-star-type? (arrow-form-to-arg-type x)) (tvar-arrow-star-type? (arrow-form-to-val-type x)))) ((star) (and (tvar-arrow-star-type? (star-form-to-left-type x)) (tvar-arrow-star-type? (star-form-to-right-type x)))) (else #f)))) (define (type-unify type1 type2) (if (and (tvar-arrow-star-type? type1) (tvar-arrow-star-type? type2)) (type-martmont (list (list type1 type2))) (myerror "type-unify: tvar-arrow-star-types expected" type1 type2))) (define (type-unify-list types1 types2) (type-unify (apply mk-arrow types1) (apply mk-arrow types2))) ; Test (display-t-substitution (type-unify-list (list (py "alpha2=>alpha2") (py "alpha1=>(alpha1=>alpha1)=>alpha2")) (list (py "alpha1") (py "(alpha3=>alpha3)=>alpha4=>alpha3")))) ; alpha1 -> alpha3=>alpha3 ; alpha2 -> alpha3 ; alpha4 -> (alpha3=>alpha3)=>alpha3=>alpha3 ; We implement Hindley's algorithm. Given a type-free lambda-term, the ; algorithm decides whether it admits a typing, and if so, computes a ; principal one. If there is no typing, it returns #f. ; Type free lambda-terms are viewed as Scheme expressions, for instance ; (u15 (lambda (u16) (u16 (lambda (u17) (u15 (lambda (u18) u17)))))) ; Typings are of the form (((var1 chi1) ... (varn chin)) phi). (define (expr-app-form? x) (and (list? x) (= 2 (length x)))) (define expr-app-form-to-op car) (define expr-app-form-to-arg cadr) (define (expr-lambda-form? x) (and (list? x) (= 3 (length x)) (eq? 'lambda (car x)) (list (cadr x)) (= 1 (length (cadr x))))) (define expr-in-abst-form-to-symbol caadr) (define expr-in-abst-form-to-kernel caddr) (define (expr-pair-form? x) (and (list? x) (= 3 (length x)) (eq? 'cons (car x)))) (define expr-pair-form-to-left cadr) (define expr-pair-form-to-right caddr) (define (expr-left-comp-form? x) (and (list? x) (= 2 (length x)) (eq? 'car (car x)))) (define expr-left-comp-form-to-kernel cadr) (define (expr-right-comp-form? x) (and (list? x) (= 2 (length x)) (eq? 'cdr (car x)))) (define expr-right-comp-form-to-kernel cadr) (define (type-inf expr) (cond ((symbol? expr) (let ((typevar (new-tvar))) (list (list (list expr typevar)) typevar))) ((expr-app-form? expr) (let ((prev1 (type-inf (expr-app-form-to-op expr))) (prev2 (type-inf (expr-app-form-to-arg expr)))) (if (or (not prev1) (not prev2)) #f (let* ((vars1 (map car (car prev1))) (vars2 (map car (car prev2))) (ws (intersection vars1 vars2)) (sigmas (do ((x (car prev1) (cdr x)) (res '() (if (memq (caar x) ws) (cons (cadar x) res) res))) ((null? x) (reverse res)))) (taus (do ((x (car prev2) (cdr x)) (res '() (if (memq (caar x) ws) (cons (cadar x) res) res))) ((null? x) (reverse res)))) (typevar (new-tvar)) (mgu (type-unify-list (cons (cadr prev1) sigmas) (cons (make-arrow (cadr prev2) typevar) taus)))) (if (not mgu) (begin (display "; Typing impossible. One can still type ") (display (expr-app-form-to-op expr)) (display " with ") (display (type-to-string (cadr prev1))) (if (null? (car prev1)) (newline) (begin (display " from") (newline) (display "; ") (do ((c (car prev1) (cdr c))) ((null? c) (newline)) (display " ") (display (caar c)) (display ":") (display (type-to-string (cadar c))) (if (pair? (cdr c)) (begin (newline) (display "; ")))))) (display "; and ") (display (expr-app-form-to-arg expr)) (display " with ") (display (type-to-string (cadr prev2))) (if (null? (car prev2)) (newline) (begin (display " from") (newline) (display "; ") (do ((c (car prev2) (cdr c))) ((null? c) (newline)) (display " ") (display (caar c)) (display ":") (display (type-to-string (cadar c))) (if (pair? (cdr c)) (begin (newline) (display "; ")))))) (display "; However, unification failed at this stage.") (newline) #f) (list (remove-duplicates (map (lambda (x) (list (car x) (type-substitute (cadr x) mgu))) (append (car prev1) (car prev2)))) (type-substitute typevar mgu))))))) ((expr-lambda-form? expr) (let* ((symbol (expr-in-abst-form-to-symbol expr)) (kernel (expr-in-abst-form-to-kernel expr)) (prev (type-inf kernel))) (if (not prev) #f (let ((info (assq symbol (car prev)))) (if info (list (remove info (car prev)) (make-arrow (cadr info) (cadr prev))) (list (car prev) (make-arrow (new-tvar) (cadr prev)))))))) ((expr-pair-form? expr) (let ((prev1 (type-inf (expr-pair-form-to-left expr))) (prev2 (type-inf (expr-pair-form-to-right expr)))) (if (or (not prev1) (not prev2)) #f (let* ((vars1 (map car (car prev1))) (vars2 (map car (car prev2))) (ws (intersection vars1 vars2)) (sigmas (do ((x (car prev1) (cdr x)) (res '() (if (memq (caar x) ws) (cons (cadar x) res) res))) ((null? x) (reverse res)))) (taus (do ((x (car prev2) (cdr x)) (res '() (if (memq (caar x) ws) (cons (cadar x) res) res))) ((null? x) (reverse res)))) (mgu (type-unify-list sigmas taus))) (if (not mgu) (begin (display "; Typing impossible. One can still type ") (display (expr-pair-form-to-left expr)) (display " with ") (display (type-to-string (cadr prev1))) (if (null? (car prev1)) (newline) (begin (display " from") (newline) (display "; ") (do ((c (car prev1) (cdr c))) ((null? c) (newline)) (display " ") (display (caar c)) (display ":") (display (type-to-string (cadar c))) (if (pair? (cdr c)) (begin (newline) (display "; ")))))) (display "; and ") (display (expr-pair-form-to-right expr)) (display " with ") (display (type-to-string (cadr prev2))) (if (null? (car prev2)) (newline) (begin (display " from") (newline) (display "; ") (do ((c (car prev2) (cdr c))) ((null? c) (newline)) (display " ") (display (caar c)) (display ":") (display (type-to-string (cadar c))) (if (pair? (cdr c)) (begin (newline) (display "; ")))))) (display "; However, unification failed at this stage.") (newline) #f) (list (remove-duplicates (map (lambda (x) (list (car x) (type-substitute (cadr x) mgu))) (append (car prev1) (car prev2)))) (make-star (type-substitute (cadr prev1) mgu) (type-substitute (cadr prev2) mgu)))))))) ((expr-left-comp-form? expr) (let* ((kernel (expr-left-comp-form-to-kernel expr)) (prev (type-inf kernel))) (if (not prev) #f (let* ((tvar1 (new-tvar)) (tvar2 (new-tvar)) (mgu (type-unify (cadr prev) (make-star tvar1 tvar2)))) (if (not mgu) (begin (display "; Typing impossible. One can still type ") (display kernel) (display " with ") (display (type-to-string (cadr prev))) (if (null? (car prev)) (newline) (begin (display " from") (newline) (display "; ") (do ((c (car prev) (cdr c))) ((null? c) (newline)) (display " ") (display (caar c)) (display ":") (display (type-to-string (cadar c))) (if (pair? (cdr c)) (begin (newline) (display "; ")))))) (display "; However, unification with tvar1;tvar2") (display " failed at this stage.") (newline) #f) (list (remove-duplicates (map (lambda (x) (list (car x) (type-substitute (cadr x) mgu))) (car prev))) (type-substitute tvar1 mgu))))))) ((expr-right-comp-form? expr) (let* ((kernel (expr-right-comp-form-to-kernel expr)) (prev (type-inf kernel))) (if (not prev) #f (let* ((tvar1 (new-tvar)) (tvar2 (new-tvar)) (mgu (type-unify (cadr prev) (make-star tvar1 tvar2)))) (if (not mgu) (begin (display "; Typing impossible. One can still type ") (display kernel) (display " with ") (display (type-to-string (cadr prev))) (if (null? (car prev)) (newline) (begin (display " from") (newline) (display "; ") (do ((c (car prev) (cdr c))) ((null? c) (newline)) (display " ") (display (caar c)) (display ":") (display (type-to-string (cadar c))) (if (pair? (cdr c)) (begin (newline) (display "; ")))))) (display "; However, unification with tvar1;tvar2") (display " failed at this stage.") (newline) #f) (list (remove-duplicates (map (lambda (x) (list (car x) (type-substitute (cadr x) mgu))) (car prev))) (type-substitute tvar2 mgu))))))) (else (myerror "type-inf: expression expected" expr)))) ; Tests ; (type-inf 'x) ; (type-inf '(x y)) ; (type-inf '(x x)) ; (type-inf '(lambda (x) (y x))) ; (type-inf '(lambda (x) (x x))) ; (type-inf '(lambda (x) (lambda (y) (lambda (z) ((x z) (y z)))))) (define (ti expr) (let ((prev (type-inf expr))) (if prev (let* ((alist (car prev)) (phi (cadr prev))) (display "; A principal typing is") (newline) (display "; ") (display (type-to-string phi)) (if (null? alist) (newline) (begin (display " from") (newline) (display "; ") (do ((c alist (cdr c))) ((null? c) (newline)) (display " ") (display (caar c)) (display ":") (display (type-to-string (cadar c))) (if (pair? (cdr c)) (begin (newline) (display "; "))))))) (myerror "ti: typeable expression expected" expr)))) ; (ti 'x) ; (ti '(lambda (x) (lambda (y) (lambda (z) ((x z) (y z)))))) ; (ti '(u15 (lambda (u16) (u16 (lambda (u17) (u15 (lambda (u18) u17))))))) ; => ; A principal typing is ; p22 from ; u15:(((p19 -> p22) -> p19) -> p19) -> p22 ; 2. (ti '(lambda (u) (lambda (v) (lambda (w) (u (u (v (v w)))))))) ; => ; A principal typing is ; (p25 -> p25) -> (p25 -> p25) -> p25 -> p25 ; This example shows that the formula inferred by type inference can ; be an instance of a more general derivable formula. ; Literature: Hindley 69, and my lecture `Typentheorie' SS92. minlog-4.0.99.20080304/modules/diatup.scm0000644000175000017500000065024110763004435016745 0ustar barralbarral;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;; MDH -- 071117 updated for integration with the mainstream MINLOG ;;; Based on the independent variant finalised in 2006 upon PhD defence ;;; This is the unique module for program extraction by Pure, Light or Monotone ;;; Dialectica interpretation. Based on M.-D. Hernest's PhD thesis ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (newline) (display "**********************************************************") (newline) (display "****** Begin loading DIALECTICA Interpretation extraction module ...") (newline) (display "**********************************************************") (newline) (define (reset-contr-count) (set! CIRC-COUNT 0) (comment "*** CIRC-COUNT counter set to ZERO ***") (set! CRLC-COUNT 0) (comment "*** CRLC-COUNT counter set to ZERO ***")) (define init-contr-count (reset-contr-count)) (define (contraction-count) (begin (newline) (display "************************************************") (newline) (display "Number of Non-Computational Contractions was ") (display CIRC-COUNT) (newline) (display "Number of Computationally Relevant Contractions was ") (display CRLC-COUNT) (newline) (display "*************************************************") (newline) )) (define (reset-IndRule-count) (set! IndRuleCOUNTER 0) (comment "*** IndRuleCOUNTER counter set to ZERO ***") (set! IndRlZeCOUNTER 0) (comment "*** IndRlZeCOUNTER counter set to ZERO ***")) (define init-IndRule-count (reset-IndRule-count)) (define (IndRule-count) (begin (newline) (display "************************************************") (newline) (display "Number of Induction Rule IR applications was ") (display IndRuleCOUNTER) (newline) (display "Number of QFR Induction Rules - IRzero - was ") (display IndRlZeCOUNTER) (newline) (display "*************************************************") (newline) )) (define (DIA-reset-counters) (reset-contr-count) (reset-IndRule-count) (set! UNFOLDING-FLAG #f) (comment "*** UNFOLDING-FLAG flag set to FALSE ***") (set! IMP-ELIM-NORMALIZE #t) (comment "*** IMP-ELIM-NORMALIZE flag set to TRUE ***")) (define (DIA-display-counters) (contraction-count) (IndRule-count)) ;; FLAGS (set! UNFOLDING-FLAG #f) (comment "*** UNFOLDING-FLAG flag set to FALSE ***") ;; The "CHECK" flag decides whether any testing should be ;; done at all - it is good to turn it to #t at the first ;; run on some example and let it remain #f for the subsequent runs (define CHECK #f) (comment "*** CHECK flag set to FALSE ***") ;; The "PARANOIA" flag decides whether a DEEP testing should be ;; done concerning the arguments of various procedure below. By ;; default only simple coherence tests are performed. This is ;; attained by setting the "PARANOIA" flag to the value "#f". ;; DEEP testing is more resource-consuming and is triggered by ;; a "#t" value of the "PARANOIA" flag. All tests are RUN-TIME!!! (define PARANOIA #f) (comment "*** PARANOIA flag set to FALSE ***") ;; The "COMENTARIU" flag decides whether a minimal level ;; of messages is active or not. (define COMENTARIU #f) (comment "*** COMENTARIU flag set to FALSE ***") (define EXTRACT-VERBOSE #f) (comment "*** EXTRACT-VERBOSE flag set to FALSE ***") (define CONTR-VERBOSE #f) (comment "*** CONTR-VERBOSE flag set to FALSE ***") ;; The "DEBUG" flag decides whether a debug-level of ;; messages is active or not. (define DEBUG #f) (comment "*** DEBUG flag set to FALSE ***") ;; The "DEBUG-IND-RL" flag decides whether a debug-level ;; of messages is active or not in connection with ;; the treatment of the Induction Rule IR. (define DEBUG-IND-RL #f) (comment "*** DEBUG-IND-RL flag set to FALSE ***") ;; The "DEBUG-UGA" flag decides whether a debug-level ;; of messages is active or not in connection with ;; the treatment of User Global Assumptions (define DEBUG-UGA #f) (comment "*** DEBUG-UGA flag set to FALSE ***") ;; The "DEBUG-DIDA" flag decides whether a debug-level ;; of messages is active or not inside the procedure ;; DIA-Data which associates the ;; characteristic term to an avar (define DEBUG-DIDA #f) (comment "*** DEBUG-DIDA flag set to FALSE ***") ;; The "DEBUG-MAVD" flag decides whether a debug-level of ;; messages is active or not inside the procedure ;; DIA-make-avar-to-Data which ;; associate the characteristic term to an avar (define DEBUG-MAVD #f) (comment "*** DEBUG-MAVD flag set to FALSE ***") ;; The "DEBUG-STAR" flag decides whether a debug-level of ;; messages is active or not inside the procedures ;; DIA-star-to-tmtuple, DIA-star-to-left and DIA-star-to-right (define DEBUG-STAR #f) (comment "*** DEBUG-STAR flag set to FALSE ***") ;; The "DEBUG-NORMALIZE" flag decides whether a debug-level of ;; messages is active or not in connection with ;; nbe-normalize-vatmpair. (define DEBUG-NORMALIZE #f) (comment "*** DEBUG-NORMALIZE flag set to FALSE ***") ;; The "NORMALIZE-PROOF" flag decides whether a normalization ;; of the proof at input should be performed before beginning ;; the program-extraction process. (define NORMALIZE-PROOF #f) (comment "*** NORMALIZE-PROOF flag set to FALSE ***") ;; The "NORMALIZE-TERMS" flag decides whether a normalization of the ;; extracted terms should be performed during the extraction ;; process or not. (define NORMALIZE-TERMS #f) (comment "*** NORMALIZE-TERMS flag set to FALSE ***") ;; The "THEOREM-NORMALIZE" flag decides whether a normalization of the ;; terms extracted from proofs of theorems should be performed (define THEOREM-NORMALIZE #f) (comment "*** THEOREM-NORMALIZE flag set to FALSE ***") (define ALL-NORMALIZE #f) (comment "*** ALL-NORMALIZE flag set to FALSE ***") (define AND-NORMALIZE #f) (comment "*** AND-NORMALIZE flag set to FALSE ***") (define IND-RL-NORMALIZE #f) (comment "*** IND-RL-NORMALIZE flag set to FALSE ***") (define CONDN-NORMALIZE #f) (comment "*** CONDN-NORMALIZE flag set to FALSE ***") (define IMP-INTRO-NORMALIZE #f) (comment "*** IMP-INTRO-NORMALIZE flag set to FALSE ***") (define IMP-ARG-NORMALIZE #f) (comment "*** IMP-ARG-NORMALIZE flag set to FALSE ***") (define IMP-ELIM-NORMALIZE #t) (comment "*** IMP-ELIM-NORMALIZE flag set to TRUE ***") (define (DIA-time proc) (if EXTRACT-VERBOSE (time proc) proc)) (define (NULL-vatuple? vatup) (if (not-vatuple? vatup) (myerror "NULL-vatuple?: " "vatuple argument expected" vatup) (if (null? (cdr vatup)) #t (if (null? (cddr vatup)) #f (and (NULL-vatuple? (cadr vatup)) (NULL-vatuple? (cddr vatup))))))) (define (NULL-tmtuple? tmtup) (if (not-tmtuple? tmtup) (myerror "NULL-tmtuple?: " "tmtuple argument expected" tmtup) (if (null? (cdr tmtup)) #t (if (null? (cddr tmtup)) #f (and (NULL-tmtuple? (cadr tmtup)) (NULL-tmtuple? (cddr tmtup))))))) (define (DIA-set-minus lst tmtupalst) (do ((l tmtupalst (cdr l)) (res lst (set-minus res (formula-to-free (avar-to-formula (caar l)))))) ((null? l) res))) (define (set-flag FLG VAL) (begin (if VAL (if (not (eq? VAL #t)) (myerror "set-flag:" "Only #t and #f are allowed as values here" "You gave me a value equivalent to #t." "Please give me THE value #t if that's" "what you really want!!!")) (if (not (eq? VAL #f)) (myerror "set-flag:" "Only #t and #f are allowed as values here" "You gave me a value equivalent to #f." "Please give me THE value #f if that's" "what you really want!!!"))) (case FLG ((CHECK) (begin (set! CHECK VAL) (comment "*** CHECK flag set to " VAL " ***") (if (not VAL) (begin (set! PARANOIA VAL) (comment "*** PARANOIA flag set to " VAL " ***") (set! DEBUG VAL) (comment "*** DEBUG flag set to " VAL " ***"))))) ((PARANOIA) (begin (set! PARANOIA VAL) (comment "*** PARANOIA flag set to " VAL " ***") (if VAL (begin (set! CHECK VAL) (comment "*** CHECK flag set to " VAL " ***")) (begin (set! DEBUG VAL) (comment "*** DEBUG flag set to " VAL " ***"))))) ((DEBUG) (begin (set! DEBUG VAL) (comment "*** DEBUG flag set to " VAL " ***") (if VAL (begin (set! PARANOIA VAL) (comment "*** PARANOIA flag set to " VAL " ***") (set! CHECK VAL) (comment "*** CHECK flag set to " VAL " ***"))))) ((NORMALIZE-TERMS) (begin (set! NORMALIZE-TERMS VAL) (comment "*** NORMALIZE-TERMS flag set to " VAL " ***") (if VAL (begin (set! UNFOLDING-FLAG #f) (comment "*** UNFOLDING-FLAG flag" "set to FALSE ***"))))) ((UNFOLDING-FLAG) (begin (set! UNFOLDING-FLAG VAL) (comment "*** UNFOLDING-FLAG flag set to " VAL " ***"))) (else (myerror "set-flag: please use one of the following FLAGS:" "'CHECK or 'PARANOIA or 'DEBUG or 'NORMALIZE-TERMS" "or 'UNFOLDING-FLAG"))))) (define (aconst-to-repro-formula1 x) (cadr (cddddr x))) (define (aconst-to-repro-formula2 x) (caddr (cddddr x))) (define FALSE_tm (make-term-in-const-form false-const)) (define TRUE_tm (make-term-in-const-form true-const)) (define NULL_typ (py "nulltype")) (define (NULL-typ? ty) (equal? NULL_typ ty)) (add-program-constant "NULL" NULL_typ) (define NULL_tm (pt "NULL")) (define (NULL-tm? tm) (equal? NULL_tm tm)) (define (not-avar? x) (if (avar? x) #f #t)) (define (not-alg-form? x) (if (alg-form? x) #f #t)) (define (not-list? x) (if (list? x) #f #t)) (define (not-null? x) (if (null? x) #f #t)) (define (not-eq? x y) (if (eq? x y) #f #t)) (define (not-pair? x) (if (pair? x) #f #t)) (define (not-type? x) (if (type? x) #f #t)) (define (not-type-form? x) (if (type-form? x) #f #t)) (define (not-var? x) (if (var? x) #f #t)) (define (nvar-form? x) (if (var-form? x) #f #t)) (define (not-term? x) (if (term? x) #f #t)) (define term-DEEP? term?) (define (not-term-DEEP? x) (if (term-DEEP? x) #f #t)) (define (DIA-formula? x) (if CHECK (formula? x) #t)) (define (not-DIA-formula? x) (if (DIA-formula? x) #f #t)) (define (DIA-type? x) (if CHECK (if PARANOIA (type? x) (type-form? x)) #t)) (define (not-DIA-type? x) (if (DIA-type? x) #f #t)) (define (DIA-var? x) (if CHECK (if PARANOIA (var? x) (var-form? x)) #t)) (define (not-DIA-var? x) (if (DIA-var? x) #f #t)) (define (DIA-term? x) (if CHECK (if PARANOIA (term-DEEP? x) (term? x)) #t)) (define (not-DIA-term? x) (if (DIA-term? x) #f #t)) (define (term-to-zero tm) (type-to-zero (term-to-type tm))) (define (var-to-zero va) (type-to-zero (var-to-type va))) (define DIA-ZERO-LIST '()) (define (type-to-zero typ) (if (ground-type? typ) (type-to-canonical-inhabitant typ) (let((info (assoc-wrt equal? typ DIA-ZERO-LIST))) (if info (cadr info) (let((new-zero (type-to-zero-aux typ))) (begin (set! DIA-ZERO-LIST (cons (list typ new-zero) DIA-ZERO-LIST)) new-zero)))))) (define (type-to-zero-aux typ) (if (arrow-form? typ) (make-term-in-abst-form (type-to-new-var (arrow-form-to-arg-type typ)) (type-to-zero (arrow-form-to-val-type typ))) (if (star-form? typ) (make-term-in-pair-form (type-to-zero (star-form-to-left-type typ)) (type-to-zero (star-form-to-right-type typ))) (type-to-canonical-inhabitant typ)))) (define (DIA-type-to-string typ) (if CHECK (if PARANOIA (DIA-type-to-string-aux typ) (if (not-type? typ) (myerror "DIA-type-to-string:" "type argument expected" typ) (DIA-type-to-string-aux typ))) (DIA-type-to-string-aux typ))) (define DIA-type-to-string-aux type-to-string) (define (DIA-var-to-string va) (if CHECK (if PARANOIA (var-to-string va) (if (not-var? va) (myerror "DIA-var-to-string:" "variable argument expected" va) (var-to-string va))) (var-to-string va))) (define (DIA-term-to-string tm) (if CHECK (if PARANOIA (term-to-string tm) (if (not-term-DEEP? tm) (myerror "DIA-term-to-string:" "term argument expected" tm) (term-to-string tm))) (term-to-string tm))) ;; DISPLAY PROCEDURES (define SNL (string #\newline)) (define SBK (string #\backspace)) (define SBK2 (string-append SBK SBK)) (define SBK3 (string-append SBK SBK2)) (define (DIA-comment . x) (if COMENTARIU (if (not-null? x) (begin (newline) (display "; ") (do ((l x (cdr l))) ((null? l) (newline)) (case (car l) ((CNL) (newline) (display "; ")) (else (display " ") (display (car l))))))))) (define (DIA-comment-forced . x) (if #t (if (not-null? x) (begin (newline) (display "; ") (do ((l x (cdr l))) ((null? l) (newline)) (case (car l) ((CNL) (newline) (display "; ")) (else (display " ") (display (car l))))))))) (define (nldisplay . strings) (newline) (display strings) (newline)) (define (normalize-term-to-string term) (DIA-term-to-string (nbe-normalize-term term))) (define (tytuple-to-string tytup) (if (not-tytuple? tytup) (myerror "tytuple-to-string:" "tytuple argument expected" tytup) (if (null? (cdr tytup)) "ETY" (if (null? (cddr tytup)) (DIA-type-to-string (cadr tytup)) (string-append "{" (tytuple-to-string (cadr tytup)) "," (tytuple-to-string (cddr tytup)) "}"))))) (define (vatuple-to-string vatup) (if (not-vatuple? vatup) (myerror "vatuple-to-string:" "vatuple argument expected" vatup) (if (null? (cdr vatup)) "EVA" (if (null? (cddr vatup)) (DIA-var-to-string (cadr vatup)) (string-append "{" (vatuple-to-string (cadr vatup)) "," (vatuple-to-string (cddr vatup)) "}"))))) (define (normalize-tmtuple-to-string tmtpl) (tmtuple-to-string (nbe-normalize-tmtuple tmtpl))) (define (tmtuple-to-string tmtup) (if (not-tmtuple? tmtup) (myerror "tmtuple-to-string:" "tmtuple argument expected" tmtup) (if (null? (cdr tmtup)) "ETM" (if (null? (cddr tmtup)) (DIA-term-to-string (cadr tmtup)) (string-append "{" (tmtuple-to-string (cadr tmtup)) "," (tmtuple-to-string (cddr tmtup)) "}"))))) (define (tmtuplist-to-string tmtuplst) (string-append "BEGIN" SNL "FIRST-TMTUPLE = " (tmtuplist-to-string-aux tmtuplst))) (define (tmtuplist-to-string-aux tmtuplst) (if (null? tmtuplst) (string-append SNL "END") (let((car_tmtuplst (car tmtuplst)) (cdr_tmtuplst (cdr tmtuplst))) (if (null? cdr_tmtuplst) (string-append (tmtuple-to-string car_tmtuplst) SNL "END") (string-append (tmtuple-to-string car_tmtuplst) SNL "NEXT-TMTUPLE = " (tmtuplist-to-string-aux cdr_tmtuplst)))))) (define (normalize-tmtuplist-to-string tmtuplst) (string-append "BEGIN" SNL "FIRST-TMTUPLE = " (normalize-tmtuplist-to-string-aux tmtuplst))) (define (normalize-tmtuplist-to-string-aux tmtuplst) (if (null? tmtuplst) (string-append SNL "END") (let ((car_tmtuplst (car tmtuplst)) (cdr_tmtuplst (cdr tmtuplst))) (if (null? cdr_tmtuplst) (string-append (normalize-tmtuple-to-string car_tmtuplst) SNL "END") (string-append (normalize-tmtuple-to-string car_tmtuplst) SNL "NEXT-TMTUPLE = " (normalize-tmtuplist-to-string-aux cdr_tmtuplst)))))) (define (nbe-normalize-tmtuplist tmtuplst) (if (null? tmtuplst) tmtuplst (let((car_tmtuplst (car tmtuplst)) (cdr_tmtuplst (cdr tmtuplst))) (if (null? cdr_tmtuplst) (list (nbe-normalize-tmtuple car_tmtuplst)) (cons (nbe-normalize-tmtuple car_tmtuplst) (nbe-normalize-tmtuplist cdr_tmtuplst)))))) (define (DIA-avars-to-string avar-lst) (string-append "BEGIN" SNL "FIRST-AVAR = " (DIA-avars-to-string-aux avar-lst))) (define (DIA-avars-to-string-aux avar-lst) (if (null? avar-lst) (string-append SNL "END") (string-append (avar-to-string (car avar-lst)) " of formula " SNL (formula-to-string (avar-to-formula (car avar-lst))) SNL "NEXT-AVAR = " (DIA-avars-to-string-aux (cdr avar-lst))))) (define (tmtuplealist-to-string tmtplalst) (if (not (tmtuplealist? tmtplalst)) (myerror "tmtuplealist-to-string:" "argument must be tmtuplealist") (string-append "BEGIN" SNL "FIRST-ASSOCIATION = " SNL (tmtuplealist-to-string-aux tmtplalst)))) (define (tmtuplealist-to-string-aux tmtplalst) (if (null? tmtplalst) (string-append SNL "END") (string-append "ASSOC-FORMULA = " (formula-to-string (avar-to-formula (caar tmtplalst))) SNL "ASSOC-TMTUPLE= " (tmtuple-to-string (cdar tmtplalst)) SNL "NEXT-ASSOCIATION = " SNL (tmtuplealist-to-string-aux (cdr tmtplalst))))) (define (types-tmtuplealist-to-string tmtplalst) (if (ntmtuplealist? tmtplalst) (myerror "types-tmtuplealist-to-string:" "argument must be tmtuplealist") (string-append "BEGIN" SNL "FIRST-ASSOCIATION = " SNL (types-tmtuplealist-to-string-aux tmtplalst)))) (define (types-tmtuplealist-to-string-aux tmtplalst) (if (null? tmtplalst) (string-append SNL "END") (string-append "ASSOC-FORMULA = " (formula-to-string (avar-to-formula (caar tmtplalst))) SNL "ASSOC-TMTUPLE= " (tytuple-to-string (tmtuple-to-tytuple (cdar tmtplalst))) SNL "NEXT-ASSOCIATION = " SNL (types-tmtuplealist-to-string-aux (cdr tmtplalst))))) (define (normalize-tmtuplealist-to-string tmtplalst) (if (ntmtuplealist? tmtplalst) (myerror "normalize-tmtuplealist-to-string:" "argument must be tmtuplealist") (string-append "BEGIN" SNL "FIRST-ASSOCIATION = " (normalize-tmtuplealist-to-string-aux tmtplalst)))) (define (normalize-tmtuplealist-to-string-aux tmtplalst) (if (null? tmtplalst) "END" (string-append "ASSOC-FORMULA = " (formula-to-string (avar-to-formula (caar tmtplalst))) SNL "ASSOC-TMTUPLE= " (normalize-tmtuple-to-string (cdar tmtplalst)) SNL "NEXT-ASSOCIATION = " (normalize-tmtuplealist-to-string-aux (cdr tmtplalst))))) (define (nbe-normalize-tmtuplealist tmtplalst) (if (null? tmtplalst) tmtplalst (cons (cons (caar tmtplalst) (nbe-normalize-tmtuple (cdar tmtplalst))) (nbe-normalize-tmtuplealist (cdr tmtplalst))))) (define (typair-to-string typair) (string-append "BEGIN" SNL "TYPAIR-LEFT = " (tytuple-to-string (typair-left typair)) SNL "TYPAIR-RIGHT = " (tytuple-to-string (typair-right typair)) SNL "END")) (define (vapair-to-string vapr) (string-append "BEGIN" SNL "VAPAIR-LEFT = " (vatuple-to-string (vapair-left vapr)) SNL "VAPAIR-RIGHT = " (vatuple-to-string (vapair-right vapr)) SNL "END")) (define (tmpair-to-string tmpair) (string-append "BEGIN" SNL "TMTUPLE = " (tmtuple-to-string (tmpair-to-tuple tmpair)) SNL "ALIST = " (tmtuplealist-to-string (tmpair-to-alist tmpair)) SNL "END")) (define (types-tmpair-to-string tmpair) (string-append "BEGIN" SNL "TMTUPLE = " (tytuple-to-string (tmtuple-to-tytuple (tmpair-to-tuple tmpair))) SNL "ALIST = " (types-tmtuplealist-to-string (tmpair-to-alist tmpair)) SNL "END")) (define (normalize-tmpair-to-string tmpair) (string-append "BEGIN" SNL "TMTUPLE = " (normalize-tmtuple-to-string (tmpair-to-tuple tmpair)) SNL "ALIST = " (normalize-tmtuplealist-to-string (tmpair-to-alist tmpair)) SNL "END")) (define (nbe-normalize-tmpair tmpair) (make-tmpair (nbe-normalize-tmtuple (tmpair-to-tuple tmpair)) (nbe-normalize-tmtuplealist (tmpair-to-alist tmpair)))) (define (vatmpair-to-string vatmpr) (string-append SNL "BEGIN" SNL (vapair-to-string (vatmpair-to-vapair vatmpr)) SNL (tmpair-to-string (vatmpair-to-tmpair vatmpr)) "END")) (define (normalize-vatmpair-to-string vatmpr) (string-append SNL "BEGIN" SNL (vapair-to-string (vatmpair-to-vapair vatmpr)) SNL (normalize-tmpair-to-string (vatmpair-to-tmpair vatmpr)) "END" )) (define (nbe-normalize-vatmpair-of rule vatmpr) (let((new-vatmpr (if rule (make-vatmpair (vatmpair-to-vapair vatmpr) (DIA-time (nbe-normalize-tmpair (vatmpair-to-tmpair vatmpr)))) vatmpr))) (begin (if DEBUG-NORMALIZE (nldisplay "nbe-normalize-vatmpair-of:" SNL (vatmpair-to-string new-vatmpr))) new-vatmpr))) (define (nbe-normalize-vatmpair vatmpr) (let((new-vatmpr (if NORMALIZE-TERMS (make-vatmpair (vatmpair-to-vapair vatmpr) (nbe-normalize-tmpair (vatmpair-to-tmpair vatmpr))) vatmpr))) (begin (if DEBUG-NORMALIZE (nldisplay "nbe-normalize-vatmpair:" SNL (vatmpair-to-string new-vatmpr))) new-vatmpr))) (define (types-vatmpair-to-string vatmpr) (string-append SNL "BEGIN" SNL (vapair-to-string (vatmpair-to-vapair vatmpr)) SNL (types-tmpair-to-string (vatmpair-to-tmpair vatmpr)) "END")) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;; DATATYPE tylist == a simple list of types ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (define (tylist? lst) (if (not-list? lst) #f (tylist?-aux lst))) (define (tylist?-aux lst) (if (null? lst) #t (if (DIA-type? (car lst)) (tylist?-aux (cdr lst)) #f))) (define (not-tylist? lst) (if (not-list? lst) #t (not-tylist?-aux lst))) (define (not-tylist?-aux lst) (if (null? lst) #f (if (not-DIA-type? (car lst)) #t (not-tylist?-aux (cdr lst))))) (define (tylist-to-string tylst) (begin (if PARANOIA (if (not-tylist? tylst) (myerror "tylist-to-string:" "tylist argument expected" tylst))) (if (null? tylst) "<| EMPTY-TYLIST |>" (string-append "<| " (tylist-to-string-aux tylst) " |>")))) (define (tylist-to-string-aux tylst) (if (null? tylst) SBK3 (string-append (type-to-string (car tylst)) " , " (tylist-to-string-aux (cdr tylst))))) (define (tytuple-to-tylist tytup) (if (not-tytuple? tytup) (myerror "tytuple-to-tylist:" "tytuple argument expected" tytup) (tytuple-to-tylist-aux tytup))) (define (tytuple-to-tylist-aux tytup) (if (null? (cdr tytup)) '() (if (null? (cddr tytup)) (list (cadr tytup)) (append (tytuple-to-tylist-aux (cadr tytup)) (tytuple-to-tylist-aux (cddr tytup)))))) (define (tylist-minus-tytuple tylst tytup) (begin (if PARANOIA (if (not-tylist? tylst) (myerror "tylist-minus-tytuple:" "1st argument must be tylist " tylst))) (if PARANOIA (if (not-tytuple? tytup) (myerror "tylist-minus-tytuple:" "2nd argument must be tytuple " tytup))) (tylist-minus-tylist tylst (tytuple-to-tylist tytup)))) (define (tylist-minus-tylist tylstOP tylstARG) (cond ((Eq? tylstOP #f) #f) ((null? tylstARG) tylstOP) (else (tylist-minus-tylist (tylist-minus-type tylstOP (car tylstARG)) (cdr tylstARG))))) (define (tylist-minus-type tylst typ) (cond ((null? tylst) #f) ((Equal? (car tylst) typ) (cdr tylst)) (else (cons (car tylst) (tylist-minus-type (cdr tylst) typ))))) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;; DATATYPE valist == a simple list of vars ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (define (valist? lst) (if (not-list? lst) #f (valist?-aux lst))) (define (valist?-aux lst) (if (null? lst) #t (if (DIA-var? (car lst)) (valist?-aux (cdr lst)) #f))) (define (not-valist? lst) (if (not-list? lst) #t (not-valist?-aux lst))) (define (not-valist?-aux lst) (if (null? lst) #f (if (not-DIA-var? (car lst)) #t (not-valist?-aux (cdr lst))))) (define (valist-to-string valst) (begin (if PARANOIA (if (not-valist? valst) (myerror "valist-to-string:" "valist argument expected" valst))) (if (null? valst) "<| EMPTY-VALIST |>" (string-append "<| " (valist-to-string-aux valst) " |>")))) (define (valist-to-string-aux valst) (if (null? valst) SBK3 (string-append (var-to-string (car valst)) " , " (valist-to-string-aux (cdr valst))))) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;; DATATYPE tmlist == a simple list of terms ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (define (tmlist? lst) (if (not-list? lst) #f (tmlist?-aux lst))) (define (tmlist?-aux lst) (if (null? lst) #t (if (DIA-term? (car lst)) (tmlist?-aux (cdr lst)) #f))) (define (not-tmlist? lst) (if (not-list? lst) #t (not-tmlist?-aux lst))) (define (not-tmlist?-aux lst) (if (null? lst) #f (if (not-DIA-term? (car lst)) #t (not-tmlist?-aux (cdr lst))))) (define (tmtuple-to-tmlist tmtup) (if (not-tmtuple? tmtup) (myerror "tmtuple-to-tmlist:" "tmtuple argument expected" tmtup) (tmtuple-to-tmlist-aux tmtup))) (define (tmtuple-to-tmlist-aux tmtup) (if (null? (cdr tmtup)) '() (if (null? (cddr tmtup)) (list (cadr tmtup)) (append (tmtuple-to-tmlist-aux (cadr tmtup)) (tmtuple-to-tmlist-aux (cddr tmtup)))))) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; DATATYPE tytuple -- a binary tree with nodes labeled by types ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (define NULL_tytup (cons 'tytuple '())) (define (type-to-tytuple ty) (if (not-DIA-type? ty) (myerror "type-to-tytuple:" "type argument expected" ty) (cons 'tytuple (cons ty '())))) (define (tytupleSIMP? tytup) (if (null? tytup) #f (if (not-pair? tytup) #f (if (not-eq? (car tytup) 'tytuple) #f (if (null? (cdr tytup)) #t (if (not-pair? (cdr tytup)) #f (if (null? (cddr tytup)) (type-form? (cadr tytup)) (and (tytupleSIMP? (cadr tytup)) (tytupleSIMP? (cddr tytup)))))))))) (define (ntytupleSIMP? tytup) (if (tytupleSIMP? tytup) #f #t)) (define (tytupleDEEP? tytup) (if (null? tytup) #f (if (not-pair? tytup) #f (if (not-eq? (car tytup) 'tytuple) #f (if (null? (cdr tytup)) #t (if (not-pair? (cdr tytup)) #f (if (null? (cddr tytup)) (type? (cadr tytup)) (and (tytupleDEEP? (cadr tytup)) (tytupleDEEP? (cddr tytup)))))))))) (define (ntytupleDEEP? tytup) (if (tytupleDEEP? tytup) #f #t)) (define (tytuple? tytup) (if CHECK (if PARANOIA (tytupleDEEP? tytup) (tytupleSIMP? tytup)) #t)) (define (not-tytuple? tytup) (if (tytuple? tytup) #f #t)) (define (tytuple-assoc tytup) (if (not-tytuple? tytup) (myerror "tytuple-assoc:" "argument must be a tytuple" tytup) (if (null? (cdr tytup)) (myerror "tytuple-assoc:" "argument cannot be NULL_tytup") (if (null? (cddr tytup)) (myerror "tytuple-assoc: argument" "cannot be SINGLETON tytuple") (let((pair (cadr tytup)) (C (cddr tytup))) (if (null? (cdr pair)) (myerror "tytuple-assoc: left of argument" "cannot be NULL_tytup") (if (null? (cddr pair)) (myerror "tytuple-assoc: left of argument" "cannot be SINGLETON tytuple") (let((A (cadr pair)) (B (cddr pair))) (tytuple-append A (tytuple-append B C "tytuple-assoc 1") "tytuple-assoc 2"))))))))) (define (NULL-tytup? tytup) (if (not-tytuple? tytup) (myerror "NULL-tytup?:" "tytuple argument expected" tytup) (NULL-tytup?-aux tytup))) (define (NULL-tytup?-aux tytup) (if (null? (cdr tytup)) #t (if (null? (cddr tytup)) (nulltype? (cadr tytup)) (and (NULL-tytup?-aux (cadr tytup)) (NULL-tytup?-aux (cddr tytup)))))) (define (tytuple-left tytup) (if (not-tytuple? tytup) (myerror "tytuple-left:" "argument must be a tytuple" tytup) (if (null? (cdr tytup)) (myerror "tytuple-left: argument" "cannot be NULL_tytup" tytup) (if (null? (cddr tytup)) (myerror "tytuple-left: argument" "cannot be singleton tytuple" tytup) (cadr tytup))))) (define (tytuple-right tytup) (if (not-tytuple? tytup) (myerror "tytuple-right: argument" "must be a tytuple" tytup) (if (null? (cdr tytup)) (myerror "tytuple-right: argument" "cannot be NULL_tytup" tytup) (if (null? (cddr tytup)) (myerror "tytuple-right: argument" "cannot be singleton tytuple" tytup) (cddr tytup))))) (define (tytuple-append ty1 ty2 err) (if (not-tytuple? ty1) (myerror "tytuple-append: first" "argument must be a tytuple" ty1 ty2 err) (if (not-tytuple? ty2) (myerror "tytuple-append: second" "argument must be a tytuple" ty1 ty2 err) (tytuple-append-SIMP ty1 ty2)))) (define (tytuple-append-SIMP ty1 ty2) (cons 'tytuple (cons ty1 ty2))) ; (define (tytuple-append-SIMP ty1 ty2) ; (if (null? (cdr ty1)) ty2 ; (if (null? (cdr ty2)) ty1 ; (cons 'tytuple (cons ty1 ty2))))) (define (make-tytuple-arrow ty1 ty2) (if (not-tytuple? ty1) (myerror "make-tytuple-arrow: first" "argument must be a tytuple" ty1 ty2) (if (not-tytuple? ty2) (myerror "make-tytuple-arrow: second" "argument must be a tytuple" ty1 ty2) (make-tytuple-arrow-aux ty1 ty2)))) (define (make-tytuple-arrow-aux ty1 ty2) (if (null? (cdr ty2)) NULL_tytup (if (null? (cddr ty2)) (type-to-tytuple (make-tytuple-arrow-aux-aux ty1 (cadr ty2))) (tytuple-append (make-tytuple-arrow-aux ty1 (cadr ty2)) (make-tytuple-arrow-aux ty1 (cddr ty2)) "make-tytuple-arrow" )))) (define (make-tytuple-arrow-aux-aux tytup typ) (if (null? (cdr tytup)) typ (if (null? (cddr tytup)) (make-arrow (cadr tytup) typ) (make-tytuple-arrow-aux-aux (cadr tytup) (make-tytuple-arrow-aux-aux (cddr tytup) typ))))) (define (tytuple_Eq? ty1 ty2) (if (not-tytuple? ty1) (begin (nldisplay "tytuple_Eq?:" "first argument is not a tytuple" ty1 ty2) #f) (if (not-tytuple? ty2) (begin (nldisplay "tytuple_Eq?:" "second argument is not a tytuple" (tytuple-to-string ty1) ty2) #f) (tytuple_Eq?-aux ty1 ty2)))) (define (tytuple_Eq?-aux ty1 ty2) (if (null? (cdr ty1)) (null? (cdr ty2)) (if (null? (cdr ty2)) #f (if (null? (cddr ty1)) (if (not-null? (cddr ty2)) #f (equal? (cadr ty1) (cadr ty2))) (if (null? (cddr ty2)) #f (and (tytuple_Eq?-aux (cadr ty1) (cadr ty2)) (tytuple_Eq?-aux (cddr ty1) (cddr ty2)))))))) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; DATATYPE vatuple -- a binary tree with nodes labeled by variables ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (define NULL_vatup (cons 'vatuple '())) (define (var-to-vatuple va) (if (not-DIA-var? va) (myerror "var-to-vatuple:" "variable argument expected" va) (cons 'vatuple (cons va '())))) (define (vatupleSIMP? vatup) (if (null? vatup) #f (if (not-pair? vatup) #f (if (not-eq? (car vatup) 'vatuple) #f (if (null? (cdr vatup)) #t (if (not-pair? (cdr vatup)) #f (if (null? (cddr vatup)) (var-form? (cadr vatup)) (and (vatupleSIMP? (cadr vatup)) (vatupleSIMP? (cddr vatup)))))))))) (define (nvatupleSIMP? vatup) (if (vatupleSIMP? vatup) #f #t)) (define (vatupleDEEP? vatup) (if (null? vatup) #f (if (not-pair? vatup) #f (if (not-eq? (car vatup) 'vatuple) #f (if (null? (cdr vatup)) #t (if (not-pair? (cdr vatup)) #f (if (null? (cddr vatup)) (var? (cadr vatup)) (and (vatupleDEEP? (cadr vatup)) (vatupleDEEP? (cddr vatup)))))))))) (define (nvatupleDEEP? vatup) (if (vatupleDEEP? vatup) #f #t)) (define (vatuple? vatup) (if CHECK (if PARANOIA (vatupleDEEP? vatup) (vatupleSIMP? vatup)) #t)) (define (not-vatuple? vatup) (if (vatuple? vatup) #f #t)) (define (vatuple-assoc vatup) (if (not-vatuple? vatup) (myerror "vatuple-assoc:" "argument must be a vatuple" vatup) (if (null? (cdr vatup)) (myerror "vatuple-assoc: argument" "cannot be NULL_vatup") (if (null? (cddr vatup)) (myerror "vatuple-assoc: argument" "cannot be SINGLETON vatuple") (let((pair (cadr vatup)) (C (cddr vatup))) (if (null? (cdr pair)) (myerror "vatuple-assoc: left of" "argument cannot be NULL_vatup") (if (null? (cddr pair)) (myerror "vatuple-assoc: left of argument" "cannot be SINGLETON vatuple") (let((A (cadr pair)) (B (cddr pair))) (vatuple-append A (vatuple-append B C "vatuple-assoc 1") "vatuple-assoc 2"))))))))) (define (vatuple-left vatup) (if (not-vatuple? vatup) (myerror "vatuple-left: argument" "must be a vatuple" vatup) (if (null? (cdr vatup)) (myerror "vatuple-left: argument" "cannot be NULL_vatup" vatup) (if (null? (cddr vatup)) (myerror "vatuple-left: argument" "cannot be singleton vatuple" vatup) (cadr vatup))))) (define (vatuple-right vatup) (if (not-vatuple? vatup) (myerror "vatuple-right: argument" "must be a vatuple" vatup) (if (null? (cdr vatup)) (myerror "vatuple-right: argument" "cannot be NULL_vatup" vatup) (if (null? (cddr vatup)) (myerror "vatuple-right: argument" "cannot be singleton vatuple" vatup) (cddr vatup))))) (define (vatuple-append va1 va2 err) (if (not-vatuple? va1) (myerror "vatuple-append: first" "argument must be a vatuple" va1 va2 err) (if (not-vatuple? va2) (myerror "vatuple-append: second" "argument must be a vatuple" va1 va2 err) (vatuple-append-SIMP va1 va2)))) (define (vatuple-append-SIMP va1 va2) (cons 'vatuple (cons va1 va2))) ; (define (vatuple-append-SIMP va1 va2) ; (if (null? (cdr va1)) va2 ; (if (null? (cdr va2)) va1 ; (cons 'vatuple (cons va1 va2))))) (define (vatuple-len vatup) (if (not-vatuple? vatup) (myerror "vatuple-len:" "vatuple argument expected" vatup) (vatuple-len-aux vatup))) (define (vatuple-len-aux vatup) (if (null? (cdr vatup)) 0 (if (null? (cddr vatup)) 1 (+ (vatuple-len-aux (cadr vatup)) (vatuple-len-aux (cddr vatup)))))) (define (tytuple-to-vatuple tytup) (if (not-tytuple? tytup) (myerror "tytuple-to-vatuple:" "tytuple argument expected" tytup) (tytuple-to-vatuple-aux tytup))) (define (tytuple-to-vatuple-aux tytup) (if (null? (cdr tytup)) NULL_vatup (if (null? (cddr tytup)) (var-to-vatuple (type-to-new-var (cadr tytup))) (vatuple-append (tytuple-to-vatuple-aux (cadr tytup)) (tytuple-to-vatuple-aux (cddr tytup)) "tytuple-to-vatuple" )))) (define (vatuple-to-tytuple vatup) (if (not-vatuple? vatup) (myerror "vatuple-to-tytuple:" "vatuple argument expected" vatup) (vatuple-to-tytuple-aux vatup))) (define (vatuple-to-tytuple-aux vatup) (if (null? (cdr vatup)) NULL_tytup (if (null? (cddr vatup)) (type-to-tytuple (var-to-type (cadr vatup))) (tytuple-append (vatuple-to-tytuple-aux (cadr vatup)) (vatuple-to-tytuple-aux (cddr vatup)) "vatuple-to-tytuple" )))) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; DATATYPE tmtuple -- a binary tree with nodes labeled by terms ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (define NULL_tmtup (cons 'tmtuple '())) (define (term-to-tmtuple tm) (if (not-DIA-term? tm) (myerror "term-to-tmtuple:" "term argument expected" tm) (if (NULL-typ? (term-to-type tm)) NULL_tmtup (cons 'tmtuple (cons tm '()))))) (define (term-to-zero-tmtuple tm) (if (not-DIA-term? tm) (myerror "term-to-zero-tmtuple:" "term argument expected" tm) (if (NULL-typ? (term-to-type tm)) NULL_tmtup (cons 'tmtuple (cons (term-to-zero tm) '()))))) (define (tmtupleSIMP? tmtup) (if (null? tmtup) #f (if (not-pair? tmtup) #f (if (not-eq? (car tmtup) 'tmtuple) #f (if (null? (cdr tmtup)) #t (if (not-pair? (cdr tmtup)) #f (if (null? (cddr tmtup)) (term? (cadr tmtup)) (and (tmtupleSIMP? (cadr tmtup)) (tmtupleSIMP? (cddr tmtup)))))))))) (define (ntmtupleSIMP? tmtup) (if (tmtupleSIMP? tmtup) #f #t)) (define (tmtupleDEEP? tmtup) (if (null? tmtup) #f (if (not-pair? tmtup) #f (if (not-eq? (car tmtup) 'tmtuple) #f (if (null? (cdr tmtup)) #t (if (not-pair? (cdr tmtup)) #f (if (null? (cddr tmtup)) (term-DEEP? (cadr tmtup)) (and (tmtupleDEEP? (cadr tmtup)) (tmtupleDEEP? (cddr tmtup)))))))))) (define (ntmtupleDEEP? tmtup) (if (tmtupleDEEP? tmtup) #f #t)) (define (tmtuple? tmtup) (if CHECK (if PARANOIA (tmtupleDEEP? tmtup) (tmtupleSIMP? tmtup)) #t)) (define (not-tmtuple? tmtup) (if (tmtuple? tmtup) #f #t)) (define (DIA-tmtuple-assoc tmtup) (if (not-tmtuple? tmtup) (myerror "DIA-tmtuple-assoc: argument" "must be a tmtuple" tmtup) (if (null? (cdr tmtup)) (myerror "DIA-tmtuple-assoc: argument" "cannot be NULL_tmtup") (if (null? (cddr tmtup)) (myerror "DIA-tmtuple-assoc: argument" "cannot be SINGLETON tmtuple") (let((pair (cadr tmtup)) (C (cddr tmtup))) (if (null? (cdr pair)) (myerror "DIA-tmtuple-assoc: left of" "argument cannot be NULL_tmtup") (if (null? (cddr pair)) (myerror "DIA-tmtuple-assoc: left of argument" "cannot be SINGLETON tmtuple") (let((A (cadr pair)) (B (cddr pair))) (tmtuple-append A (tmtuple-append B C "DIA-tmtuple-assoc 1") "DIA-tmtuple-assoc 2"))))))))) (define (tmtuple-left tmtup) (if (not-tmtuple? tmtup) (myerror "tmtuple-left: argument" "must be a tmtuple" tmtup) (if (null? (cdr tmtup)) (myerror "tmtuple-left: argument" "cannot be NULL_tmtup" tmtup) (if (null? (cddr tmtup)) (myerror "tmtuple-left: argument" "cannot be singleton tmtuple" tmtup) (cadr tmtup))))) (define (tmtuple-right tmtup) (if (not-tmtuple? tmtup) (myerror "tmtuple-right: argument" "must be a tmtuple" tmtup) (if (null? (cdr tmtup)) (myerror "tmtuple-right: argument" "cannot be NULL_tmtup" tmtup) (if (null? (cddr tmtup)) (myerror "tmtuple-right: argument" "cannot be singleton tmtuple" tmtup) (cddr tmtup))))) ; VARIANT of 17 July 2005 ; (define (tmtuple-left tmtup) ; (if (not-tmtuple? tmtup) ; (myerror "tmtuple-left: argument" ; "must be a tmtuple" tmtup) ; (if (null? (cdr tmtup)) ; NULL_tmtup ; (if (null? (cddr tmtup)) ; (myerror ; "tmtuple-left: argument" ; "cannot be singleton tmtuple" ; tmtup) ; (cadr tmtup))))) ; (define (tmtuple-right tmtup) ; (if (not-tmtuple? tmtup) ; (myerror "tmtuple-right: argument" ; "must be a tmtuple" tmtup) ; (if (null? (cdr tmtup)) ; NULL_tmtup ; (if (null? (cddr tmtup)) ; (myerror ; "tmtuple-right: argument" ; "cannot be singleton tmtuple" ; tmtup) ; (cddr tmtup))))) (define (tmtuple-to-term tmtup) (if (not-tmtuple? tmtup) (myerror "tmtuple-to-term: argument" "must be a tmtuple" tmtup) (if (null? (cdr tmtup)) (myerror "tmtuple-to-term: argument" "cannot be NULL_tmtup" tmtup) (if (not-null? (cddr tmtup)) (myerror "tmtuple-to-term: argument" "must be singleton tmtuple" tmtup) (cadr tmtup))))) (define (vatuple-to-var vatup) (if (not-vatuple? vatup) (myerror "vatuple-to-var: argument" "must be a vatuple" vatup) (if (null? (cdr vatup)) (myerror "vatuple-to-var: argument" "cannot be NULL_vatup" vatup) (if (not-null? (cddr vatup)) (myerror "vatuple-to-var: argument" "must be singleton vatuple" vatup) (cadr vatup))))) (define (tmtuple-append tm1 tm2 err) (if (not-tmtuple? tm1) (myerror "tmtuple-append:" "first argument must be a tmtuple" tm1 tm2 err) (if (not-tmtuple? tm2) (myerror "tmtuple-append:" "second argument must be a tmtuple" tm1 tm2 err) (tmtuple-append-SIMP tm1 tm2)))) (define (tmtuple-append-SIMP tm1 tm2) (cons 'tmtuple (cons tm1 tm2))) ; (define (tmtuple-append-SIMP tm1 tm2) ; (if (null? (cdr tm1)) tm2 ; (if (null? (cdr tm2)) tm1 ; (cons 'tmtuple (cons tm1 tm2))))) (define (tytuple-to-tmtuple tytup) (if (not-tytuple? tytup) (myerror "tytuple-to-tmtuple:" "tytuple argument expected" tytup) (tytuple-to-tmtuple-aux tytup))) (define (tytuple-to-tmtuple-aux tytup) (if (null? (cdr tytup)) NULL_tmtup (if (null? (cddr tytup)) (term-to-tmtuple (make-term-in-var-form (type-to-new-var (cadr tytup)))) (tmtuple-append (tytuple-to-tmtuple-aux (cadr tytup)) (tytuple-to-tmtuple-aux (cddr tytup)) "tytuple-to-tmtuple" )))) (define (vatuple-to-tmtuple vatup) (if (not-vatuple? vatup) (myerror "vatuple-to-tmtuple:" "vatuple argument expected" vatup) (vatuple-to-tmtuple-aux vatup))) (define (vatuple-to-tmtuple-aux vatup) (if (null? (cdr vatup)) NULL_tmtup (if (null? (cddr vatup)) (term-to-tmtuple (make-term-in-var-form (cadr vatup))) (tmtuple-append (vatuple-to-tmtuple-aux (cadr vatup)) (vatuple-to-tmtuple-aux (cddr vatup)) "vatuple-to-tmtuple" )))) (define (vatuple-to-zero-tmtuple vatup) (if (not-vatuple? vatup) (myerror "vatuple-to-zero-tmtuple:" "vatuple argument expected" vatup) (vatuple-to-zero-tmtuple-aux vatup))) (define (vatuple-to-zero-tmtuple-aux vatup) (if (null? (cdr vatup)) NULL_tmtup (if (null? (cddr vatup)) (term-to-tmtuple (var-to-zero (cadr vatup))) (tmtuple-append (vatuple-to-zero-tmtuple-aux (cadr vatup)) (vatuple-to-zero-tmtuple-aux (cddr vatup)) "vatuple-to-zero-tmtuple" )))) (define (tytuple-to-ZERO tytup) (if (not-tytuple? tytup) (myerror "tytuple-to-ZERO:" "tytuple argument expected" tytup) (tytuple-to-ZERO-aux tytup))) (define (tytuple-to-ZERO-aux tytup) (if (null? (cdr tytup)) NULL_tmtup (if (null? (cddr tytup)) (term-to-tmtuple (type-to-zero (cadr tytup))) (tmtuple-append (tytuple-to-ZERO-aux (cadr tytup)) (tytuple-to-ZERO-aux (cddr tytup)) "tytuple-to-ZERO" )))) (define (tmtuple-to-tytuple tmtup) (if (not-tmtuple? tmtup) (myerror "tmtuple-to-tytuple:" "tmtuple argument expected" tmtup) (tmtuple-to-tytuple-aux tmtup))) (define (tmtuple-to-tytuple-aux tmtup) (if (null? (cdr tmtup)) NULL_tytup (if (null? (cddr tmtup)) (type-to-tytuple (term-to-type (cadr tmtup))) (tytuple-append (tmtuple-to-tytuple-aux (cadr tmtup)) (tmtuple-to-tytuple-aux (cddr tmtup)) "tmtuple-to-tytuple" )))) (define (tmtuple-substitute tmtup subst) (if (not-tmtuple? tmtup) (myerror "tmtuple-substitute:" "tmtuple argument expected" tmtup subst) (if (null? subst) tmtup (tmtuple-substitute-aux tmtup subst)))) (define (tmtuple-substitute-aux tmtup subst) (if (null? (cdr tmtup)) NULL_tmtup (if (null? (cddr tmtup)) (term-to-tmtuple (term-substitute (cadr tmtup) subst)) (tmtuple-append (tmtuple-substitute-aux (cadr tmtup) subst) (tmtuple-substitute-aux (cddr tmtup) subst) "tmtuple-substitute" )))) (define (DIA-tmtuple-non-simult-subst tmtup alst) (if (not-tmtuple? tmtup) (myerror "DIA-tmtuple-non-simult-subst:" "tmtuple argument expected" tmtup alst) (if (null? alst) tmtup (DIA-tmtuple-non-simult-subst-aux tmtup alst)))) (define (DIA-tmtuple-non-simult-subst-aux tmtup alst) (if (null? (cdr tmtup)) NULL_tmtup (if (null? (cddr tmtup)) (term-to-tmtuple (DIA-term-non-simult-subst (cadr tmtup) alst)) (tmtuple-append (DIA-tmtuple-non-simult-subst-aux (cadr tmtup) alst) (DIA-tmtuple-non-simult-subst-aux (cddr tmtup) alst) "DIA-tmtuple-non-simult-subst" )))) (define (DIA-term-non-simult-subst tm alst) (if (null? alst) tm (let*((free (term-to-free tm)) (new-alst (DIA-select-alist free alst))) (DIA-term-non-simult-subst-aux tm new-alst)))) (define (DIA-term-non-simult-subst-aux tm alst) (if (null? alst) tm (make-term-in-app-form (make-term-in-abst-form (caar alst) (DIA-term-non-simult-subst-aux tm (cdr alst))) (cdar alst)))) (define (DIA-select-alist valst alst) (if (null? alst) '() (let*((head (car alst)) (var (car head)) (rec-alst (DIA-select-alist valst (cdr alst)))) (if (or (notelem? var valst) (elem-alist? var rec-alst)) rec-alst (cons head rec-alst))))) (define (DIA-make-ZERO-alist valst) (if (not-valist? valst) (myerror "DIA-make-ZERO-alist:" "argument must be valist" valst) (DIA-make-ZERO-alist-aux valst))) (define (DIA-make-ZERO-alist-aux valst) (if (null? valst) '() (cons (cons (car valst) (var-to-zero (car valst))) (DIA-make-ZERO-alist-aux (cdr valst))))) (define (DIA-non-simult-alist-to-string alst) (if (null? alst) "[ EMPTY-ALIST ]" (string-append " [ " SBK3 (DIA-non-simult-alist-to-string-aux alst)))) (define (DIA-non-simult-alist-to-string-aux alst) (if (null? alst) " ]" (string-append " , " (var-to-string (caar alst)) " -> " (term-to-string (cdar alst)) (DIA-non-simult-alist-to-string-aux (cdr alst))))) (define (tmtuple-to-free tmtup) (if (not-tmtuple? tmtup) (myerror "tmtuple-to-free:" "tmtuple argument expected" tmtup) (tmtuple-to-free-aux tmtup))) (define (tmtuple-to-free-aux tmtup) (if (null? (cdr tmtup)) '() (if (null? (cddr tmtup)) (term-to-free (cadr tmtup)) (union (tmtuple-to-free-aux (cadr tmtup)) (tmtuple-to-free-aux (cddr tmtup)))))) (define (DIA-star-to-tmtuple tm tytup) (if (not-DIA-term? tm) (myerror "DIA-star-to-tmtuple:" "1st argument must be term " tm) (if (not-tytuple? tytup) (myerror "DIA-star-to-tmtuple:" "2nd argument must be tytuple" tytup) (let((LEGDUM (if DEBUG-STAR (DIA-comment-forced "DIA-star-to-tmtuple BEGIN :" 'CNL (term-to-string tm) 'CNL (tytuple-to-string tytup)))) (rv (DIA-star-to-tmtuple-aux tm tytup))) (begin (if DEBUG-STAR (DIA-comment-forced "DIA-star-to-tmtuple RESULT is" 'CNL (tmtuple-to-string rv))) rv))))) (define (DIA-star-to-tmtuple-aux tm tytup) (if (NULL-tytup? tytup) (if (NULL-tm? tm) (tytuple-to-tmtuple tytup) (myerror "DIA-star-to-tmtuple:" "NULL_tm expected")) (if (null? (cddr tytup)) (if (equal? (cadr tytup) (term-to-type tm)) (term-to-tmtuple tm) (myerror "DIA-star-to-tmtuple:" "type of term" (term-to-string tm) "should be the type in" (tytuple-to-string tytup))) (let*((left-tm (DIA-star-to-left tm tytup)) (LEGDUM (if DEBUG-STAR (DIA-comment-forced "DIA-star-to-tmtuple:" "left-tm is " 'CNL (term-to-string left-tm)))) (right-tm (DIA-star-to-right tm tytup)) (LEGDUM (if DEBUG-STAR (DIA-comment-forced "DIA-star-to-tmtuple:" "right-tm is " 'CNL (term-to-string right-tm))))) (tmtuple-append (DIA-star-to-tmtuple-aux left-tm (cadr tytup)) (DIA-star-to-tmtuple-aux right-tm (cddr tytup)) "DIA-star-to-tmtuple"))))) (define (DIA-star-to-left tm tytup) (begin (if DEBUG-STAR (DIA-comment-forced "DIA-star-to-left:" 'CNL (term-to-string tm) 'CNL (tytuple-to-string tytup))) (if (NULL-tytup? (cadr tytup)) NULL_tm (if (NULL-tytup? (cddr tytup)) tm (if (star-form? (term-to-type tm)) (if (term-in-pair-form? tm) (term-in-pair-form-to-left tm) (make-term-in-lcomp-form tm)) (myerror "DIA-star-to-left: star-form" "required for type of " (term-to-string tm))))))) (define (DIA-star-to-right tm tytup) (begin (if DEBUG-STAR (DIA-comment-forced "DIA-star-to-right:" 'CNL (term-to-string tm) 'CNL (tytuple-to-string tytup))) (if (NULL-tytup? (cddr tytup)) NULL_tm (if (NULL-tytup? (cadr tytup)) tm (if (star-form? (term-to-type tm)) (if (term-in-pair-form? tm) (term-in-pair-form-to-right tm) (make-term-in-rcomp-form tm)) (myerror "DIA-star-to-right: star-form" "required for type of " (term-to-string tm))))))) (define (DIA-tmtuple-to-star tmtup) (if (not-tmtuple? tmtup) (myerror "DIA-tmtuple-to-star:" "tmtuple argument expected" tmtup) (DIA-tmtuple-to-star-aux tmtup))) (define (DIA-tmtuple-to-star-aux tmtup) (if (null? (cdr tmtup)) NULL_tm (if (null? (cddr tmtup)) (cadr tmtup) (DIA-make-term-in-star-form (DIA-tmtuple-to-star-aux (cadr tmtup)) (DIA-tmtuple-to-star-aux (cddr tmtup)))))) (define (DIA-make-term-in-star-form tm1 tm2) (if (NULL-tm? tm1) tm2 (if (NULL-tm? tm2) tm1 (make-term-in-pair-form tm1 tm2)))) (define (make-tmtuple-in-abst-form vatup tmtup) (if (not-vatuple? vatup) (myerror "make-tmtuple-in-abst-form:" "first argument must be a vatuple" vatup tmtup) (if (not-tmtuple? tmtup) (myerror "make-tmtuple-in-abst-form:" "second argument must be a tmtuple" vatup tmtup) (make-tmtuple-in-abst-form-aux vatup tmtup)))) (define (make-tmtuple-in-abst-form-aux vatup tmtup) (if (null? (cdr tmtup)) NULL_tmtup (if (null? (cddr tmtup)) (term-to-tmtuple (make-tmtuple-in-abst-form-aux-aux vatup (cadr tmtup))) (tmtuple-append (make-tmtuple-in-abst-form-aux vatup (cadr tmtup)) (make-tmtuple-in-abst-form-aux vatup (cddr tmtup)) "make-tmtuple-in-abst-form" )))) (define (make-tmtuple-in-abst-form-aux-aux vatup tm) (if (null? (cdr vatup)) tm (if (null? (cddr vatup)) (make-term-in-abst-form (cadr vatup) tm) (make-tmtuple-in-abst-form-aux-aux (cadr vatup) (make-tmtuple-in-abst-form-aux-aux (cddr vatup) tm))))) (define (make-tmtuple-in-paral-app-form tmtupOP tmtupARG) (if (not-tmtuple? tmtupOP) (myerror "make-tmtuple-in-paral-app-form:" "first argument must be a tmtuple" tmtupOP tmtupARG) (if (not-tmtuple? tmtupARG) (myerror "make-tmtuple-in-paral-app-form:" "second argument must be a tmtuple" tmtupOP tmtupARG) (make-tmtuple-in-paral-app-form-aux tmtupOP tmtupARG)))) (define (make-tmtuple-in-paral-app-form-aux tmtupOP tmtupARG) (if (null? (cdr tmtupOP)) NULL_tmtup (if (null? (cddr tmtupOP)) (term-to-tmtuple (make-term-in-app-form (cadr tmtupOP) (cadr tmtupARG))) (tmtuple-append (make-tmtuple-in-paral-app-form-aux (cadr tmtupOP) (cadr tmtupARG)) (make-tmtuple-in-paral-app-form-aux (cddr tmtupOP) (cddr tmtupARG)) "make-tmtuple-in-paral-app-form" )))) (define (make-tmtuple-in-app-form tmtupOP tmtupARG) (if (not-tmtuple? tmtupOP) (myerror "make-tmtuple-in-app-form:" "first argument must be a tmtuple" tmtupOP tmtupARG) (if (not-tmtuple? tmtupARG) (myerror "make-tmtuple-in-app-form:" "second argument must be a tmtuple" tmtupOP tmtupARG) (make-tmtuple-in-app-form-aux tmtupOP tmtupARG)))) (define (make-tmtuple-in-app-form-aux tmtupOP tmtupARG) (if (null? (cdr tmtupOP)) NULL_tmtup (if (null? (cddr tmtupOP)) (term-to-tmtuple (make-tmtuple-in-app-form-aux-aux (cadr tmtupOP) tmtupARG)) (tmtuple-append (make-tmtuple-in-app-form-aux (cadr tmtupOP) tmtupARG) (make-tmtuple-in-app-form-aux (cddr tmtupOP) tmtupARG) "make-tmtuple-in-app-form" )))) (define (make-tmtuple-in-app-form-aux-aux tm tmtup) (if (null? (cdr tmtup)) tm (if (null? (cddr tmtup)) (make-term-in-app-form tm (cadr tmtup)) (make-tmtuple-in-app-form-aux-aux (make-tmtuple-in-app-form-aux-aux tm (cadr tmtup)) (cddr tmtup))))) (define (nbe-normalize-tmtuple tmtup) (if (not-tmtuple? tmtup) (myerror "nbe-normalize-tmtuple:" "tmtuple argument expected" tmtup) (nbe-normalize-tmtuple-aux tmtup))) (define (nbe-normalize-tmtuple-aux tmtup) (if (null? (cdr tmtup)) NULL_tmtup (if (null? (cddr tmtup)) (term-to-tmtuple (nbe-normalize-term (cadr tmtup))) (tmtuple-append (nbe-normalize-tmtuple-aux (cadr tmtup)) (nbe-normalize-tmtuple-aux (cddr tmtup)) "nbe-normalize-tmtuple" )))) (define (make-tmtuple-in-if-form tm tmtup1 tmtup2) (begin (if PARANOIA (if (not-DIA-term? tm) (myerror "make-tmtuple-in-if-form:" "1st argument must be term" tm) (if (not (alg-form? (term-to-type tm))) (myerror "make-tmtuple-in-if-form:" "1st argument must be" "boolean term" tm) (if (not (string=? "boole" (alg-form-to-name (term-to-type tm)))) (myerror "make-tmtuple-in-if-form:" "1st argument must be" "boolean term" tm) (if (not-tmtuple? tmtup1) (myerror "make-tmtuple-in-if-form:" "2nd argument must be tmtuple" tmtup1) (if (not-tmtuple? tmtup2) (myerror "make-tmtuple-in-if-form:" "3rd argument must be tmtuple" tmtup2))))))) (make-tmtuple-in-if-form-aux tm tmtup1 tmtup2))) (define (make-tmtuple-in-if-form-aux tm tmtup1 tmtup2) (if (null? (cdr tmtup1)) (if (not-null? (cdr tmtup2)) (myerror "make-tmtuple-in-if-form: 2nd and" "3rd argument must be isomorphic" tmtup1 tmtup2) NULL_tmtup) (if (null? (cddr tmtup1)) (if (null? (cdr tmtup2)) (myerror "make-tmtuple-in-if-form: 2nd and" "3rd argument must be isomorphic" tmtup1 tmtup2) (if (not-null? (cddr tmtup2)) (myerror "make-tmtuple-in-if-form: 2nd and" "3rd argument must be isomorphic" tmtup1 tmtup2) (term-to-tmtuple (make-term-in-if-form tm (list (cadr tmtup1) (cadr tmtup2)))))) (tmtuple-append (make-tmtuple-in-if-form-aux tm (cadr tmtup1) (cadr tmtup2)) (make-tmtuple-in-if-form-aux tm (cddr tmtup1) (cddr tmtup2)) "make-tmtuple-in-if-form")))) ;; The following procedure creates substitutions from isomorphic ;; vatuple and tmtuple arguments (define (DIA-make-substitution vatup tmtup err) (if (not-vatuple? vatup) (myerror err "DIA-make-substitution:" "1st argument must be vatuple" vatup) (if (not-tmtuple? tmtup) (myerror err "DIA-make-substitution:" "2nd argument must be tmtuple" tmtup) (DIA-make-substitution-aux vatup tmtup err)))) (define (DIA-make-substitution-aux vatup tmtup err) (if (null? (cdr vatup)) (if (not-null? (cdr tmtup)) (myerror err "DIA-make-substitution-aux:" "1st and 2nd argument must be isomorphic") empty-subst) (if (null? (cddr vatup)) (if (not-null? (cddr tmtup)) (myerror err "DIA-make-substitution-aux:" "1st and 2nd argument must be isomorphic") (make-subst (cadr vatup) (cadr tmtup))) (append (DIA-make-substitution-aux (cadr vatup) (cadr tmtup) err) (DIA-make-substitution-aux (cddr vatup) (cddr tmtup) err))))) ;; The following procedure creates alists of associations of ;; terms to vars to be used for non-simultaneous substitutions ;; The arguments must be isomorphic vatuple and tmtuple (define (DIA-make-alist vatup tmtup err) (if (not-vatuple? vatup) (myerror err "DIA-make-alist:" "1st argument must be vatuple" vatup) (if (not-tmtuple? tmtup) (myerror err "DIA-make-alist:" "2nd argument must be tmtuple" tmtup) (DIA-make-alist-aux vatup tmtup err)))) (define (DIA-make-alist-aux vatup tmtup err) (if (null? (cdr vatup)) (if (not-null? (cdr tmtup)) (myerror err "DIA-make-alist-aux: 1st and 2nd" "argument must be isomorphic") '()) (if (null? (cddr vatup)) (if (not-null? (cddr tmtup)) (myerror err "DIA-make-alist-aux: 1st and 2nd" "argument must be isomorphic") (list (cons (cadr vatup) (cadr tmtup)))) (append (DIA-make-alist-aux (cadr vatup) (cadr tmtup) err) (DIA-make-alist-aux (cddr vatup) (cddr tmtup) err))))) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;; DATATYPE tmtuplist == a simple list of tmtuples ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (define (tmtuplist? tupli) (if CHECK (if (not-list? tupli) #f (tmtuplist-aux? tupli)) #t)) (define (tmtuplist-aux? tupli) (if (null? tupli) #t (if (not-tmtuple? (car tupli)) #f (tmtuplist-aux? (cdr tupli))))) (define (not-tmtuplist? tupli) (if (tmtuplist? tupli) #f #t)) (define (make-tmtuplist-in-app-form tmtuplst tmtup) (if (not-tmtuple? tmtup) (myerror "make-tmtuplist-in-app-form:" "2nd argument must be tmtuple" tmtup) (if (not-tmtuplist? tmtuplst) (myerror "make-tmtuplist-in-app-form:" "1st argument must be tmtuplist" tmtuplst) (make-tmtuplist-in-app-form-aux tmtuplst tmtup)))) (define (make-tmtuplist-in-app-form-aux tmtuplst tmtup) (if (null? tmtuplst) (list) (cons (make-tmtuple-in-app-form-aux (car tmtuplst) tmtup) (make-tmtuplist-in-app-form-aux (cdr tmtuplst) tmtup)))) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;; DATATYPE tmtuplealist == a list of associations ;;; of tmtuples to assumption variables ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (define (tmtuplealist? tmtupali) (if CHECK (if (not-list? tmtupali) #f (tmtuplealist-aux? tmtupali)) #t)) (define (tmtuplealist-aux? tmtupali) (if (null? tmtupali) #t (if (not-tmtuple? (cdar tmtupali)) #f (if (not-avar? (caar tmtupali)) #f (tmtuplealist-aux? (cdr tmtupali)))))) (define (ntmtuplealist? tmtupali) (if (tmtuplealist? tmtupali) #f #t)) (define NULL_tmtupalst (list)) (define (make-tmtuplealist-in-abst-form vatpl tmtplalst) (if (not-vatuple? vatpl) (myerror "make-tmtuplealist-in-abst-form:" "1st argument must be vatuple") (if (ntmtuplealist? tmtplalst) (myerror "make-tmtuplealist-in-abst-form: 2nd" "argument must be tmtuple-alist") (make-tmtuplealist-in-abst-form-aux vatpl tmtplalst)))) (define (make-tmtuplealist-in-abst-form-aux vatpl tmtplalst) (if (null? tmtplalst) tmtplalst (cons (cons (caar tmtplalst) (make-tmtuple-in-abst-form vatpl (cdar tmtplalst))) (make-tmtuplealist-in-abst-form-aux vatpl (cdr tmtplalst))))) (define (make-tmtuplealist-in-app-form tmtplalst tmtpl) (if (not-tmtuple? tmtpl) (myerror "make-tmtuplealist-in-app-form:" "2nd argument must be tmtuple") (if (ntmtuplealist? tmtplalst) (myerror "make-tmtuplealist-in-app-form: 1st" "argument must be tmtuple-alist") (make-tmtuplealist-in-app-form-aux tmtplalst tmtpl)))) (define (make-tmtuplealist-in-app-form-aux tmtplalst tmtpl) (if (null? tmtplalst) tmtplalst (cons (cons (caar tmtplalst) (make-tmtuple-in-app-form (cdar tmtplalst) tmtpl)) (make-tmtuplealist-in-app-form-aux (cdr tmtplalst) tmtpl)))) (define (alist-to-formula-free tmtplalst) (if (null? tmtplalst) (list) (union (formula-to-free (avar-to-formula (caar tmtplalst))) (alist-to-formula-free (cdr tmtplalst))))) (define (alist-to-tmtuple-free tmtplalst) (if (null? tmtplalst) (list) (union (tmtuple-to-free (cdar tmtplalst)) (alist-to-tmtuple-free (cdr tmtplalst))))) (define (tmtuplealist-substitute tmtplalst subst) (if (ntmtuplealist? tmtplalst) (myerror "tmtuplealist-substitute:" "1st argument must be tmtuplealist") (tmtuplealist-substitute-aux tmtplalst subst))) (define (tmtuplealist-substitute-aux tmtplalst subst) (if (null? subst) tmtplalst (if (null? tmtplalst) tmtplalst (cons (cons (caar tmtplalst) (tmtuple-substitute (cdar tmtplalst) subst)) (tmtuplealist-substitute-aux (cdr tmtplalst) subst))))) (define (tmtuplealist-non-simult-subst tmtplalst subst) (if (ntmtuplealist? tmtplalst) (myerror "tmtuplealist-substitute:" "1st argument must be tmtuplealist") (tmtuplealist-non-simult-subst-aux tmtplalst subst))) (define (tmtuplealist-non-simult-subst-aux tmtplalst subst) (if (null? subst) tmtplalst (if (null? tmtplalst) tmtplalst (cons (cons (caar tmtplalst) (DIA-tmtuple-non-simult-subst (cdar tmtplalst) subst)) (tmtuplealist-non-simult-subst-aux (cdr tmtplalst) subst))))) ; DATATYPE typair == pair of tytuples (define (typair-left typair) (cadr typair)) (define (typair-right typair) (caddr typair)) (define (typair? t) (if (not-eq? 'typair (car t)) #f (if (not-tytuple? (cadr t)) #f (tytuple? (caddr t))))) (define (ntypair? t) (if (typair? t) #f #t)) (define (make-typair tytpl1 tytpl2) (begin (if PARANOIA (if (not-tytuple? tytpl1) (myerror "make-typair: 1st" "argument not a tytuple" tytpl1) (if (not-tytuple? tytpl2) (myerror "make-typair: 2nd" "argument not a tytuple" tytpl2)))) (list 'typair tytpl1 tytpl2))) (define NULL_typr (make-typair NULL_tytup NULL_tytup)) ; DATATYPE vapair == pair of vatuples (define (vapair-left vapr) (cadr vapr)) (define (vapair-right vapr)(caddr vapr)) (define (vapair? vapr) (if (not-eq? 'vapair (car vapr)) #f (if (not-vatuple? (cadr vapr)) #f (vatuple? (caddr vapr))))) (define (nvapair? t) (if (vapair? t) #f #t)) (define (make-vapair vatpl1 vatpl2) (begin (if PARANOIA (if (not-vatuple? vatpl1) (myerror "make-vapair: 1st" "argument not a vatuple" vatpl1) (if (not-vatuple? vatpl2) (myerror "make-vapair: 2nd" "argument not a vatuple" vatpl2)))) (list 'vapair vatpl1 vatpl2))) (define NULL_vapr (make-vapair NULL_vatup NULL_vatup)) (define (typair-to-vapair typr) (begin (if PARANOIA (if (ntypair? typr) (myerror "typair-to-vapair:" "argument not a typair" typr))) (let*((vatup-le (tytuple-to-vatuple (typair-left typr))) (vatup-ri (tytuple-to-vatuple (typair-right typr)))) (make-vapair vatup-le vatup-ri)))) ; DATATYPE tmpair == pair with left component tmtuple and ; right component a tmtuple alist (define (tmpair-to-tuple tmpair) (cadr tmpair)) (define (tmpair-to-alist tmpair) (caddr tmpair)) (define (tmpair? t) (if (not-eq? 'tmpair (car t)) #f (if (not-tmtuple? (cadr t)) #f (tmtuplealist? (caddr t))))) (define (ntmpair? t) (if (tmpair? t) #f #t)) (define (make-tmpair tmtpl tmtplalst) (if (not-tmtuple? tmtpl) (myerror "make-tmpair: 1st" "argument must be tmtuple" tmtpl) (if (ntmtuplealist? tmtplalst) (myerror "make-tmpair: 2nd" "argument must be tmtuplealist") (list 'tmpair tmtpl tmtplalst)))) ; DATATYPE vatmpair == ; pair of first component vapair and second component tmpair (define (make-vatmpair vapr tmpair) (list 'vatmpair vapr tmpair)) (define (vatmpair-to-vapair vatmpr) (cadr vatmpr)) (define (vatmpair-to-tmpair vatmpr) (caddr vatmpr)) (define NULLvatmpair (make-vatmpair (make-vapair NULL_vatup NULL_vatup) (make-tmpair NULL_tmtup NULL_tmtupalst))) (define (vatmpair? vmp) (if (not-eq? 'vatmpair (car t)) #f (if (nvapair? (cadr t)) #f (tmpair? (caddr t))))) (define (nvatmpair? t) (if (vatmpair? t) #f #t)) ; In DIA-pvar-to-tvarp we assign a pair of tytuples to the predicate ; variables. For to be able to later refer to this assignment, we use a ; global variable DIA-PVAR-TO-TVARP-ALIST, which memorizes the assigment done ; so far. Later reference is necessary, because such tvars will appear ; in extracted programs of theorems involving pvars, and in a given ; development there may be many auxiliary lemmata containing the same ; pvar. A fixed DIA-pvar-to-tvarp refers to and updates DIA-PVAR-TO-TVARP-ALIST. (define DIA-PVAR-TO-TVARP-ALIST '()) (define DIA-INIT-PVAR-TO-TVARP-ALIST DIA-PVAR-TO-TVARP-ALIST) (define (DIA-pvar-to-tvarp pvar) (let ((info (assoc pvar DIA-PVAR-TO-TVARP-ALIST))) (if info (cadr info) (let ((DIA-newtvarp (make-typair (type-to-tytuple (new-tvar)) (type-to-tytuple (new-tvar))))) (set! DIA-PVAR-TO-TVARP-ALIST (cons (list pvar DIA-newtvarp) DIA-PVAR-TO-TVARP-ALIST)) DIA-newtvarp)))) ; In DIA-formula-to-typair KIND we assign a typair (i.e., a pair of tytuples) ; to any formula passed as argument such that a new pair of type variables is ; assigned to the predicate variables occuring in the argument formulas ; - assignment carried on via DIA-pvar-to-tvarp. Here as well the assigments ; already done are remembered. The assignments are memorized in the global ; variable DIA-PVAR-TO-TVARP-ALIST. (define (DIA-formula-to-typair KIND fmla) (case (tag fmla) ((atom) NULL_typr) ((predicate) (if (formula=? falsity-log fmla) NULL_typr (let((pred (predicate-form-to-predicate fmla))) (if (predconst-form? pred) NULL_typr (if (pvar-form? pred) (if (= 0 (pvar-to-h-deg pred)) (DIA-pvar-to-tvarp pred) NULL_typr) (if (idpredconst-form? pred) (myerror "DIA-formula-to-typair:" "inductive predicates not supported yet" pred) (myerror "DIA-formula-to-typair:" "predicate expected" (formula-to-string fmla) pred))))))) ((imp) (make-arrow-fiet (DIA-formula-to-typair KIND (imp-form-to-premise fmla)) (DIA-formula-to-typair KIND (imp-form-to-conclusion fmla)))) ((and) (make-star-fiet (DIA-formula-to-typair KIND (and-form-to-left fmla)) (DIA-formula-to-typair KIND (and-form-to-right fmla)))) ((all) (make-all-fiet (type-to-tytuple (var-to-type (all-form-to-var fmla))) (DIA-formula-to-typair KIND (all-form-to-kernel fmla)))) ((allnc) (case KIND ((light monot) (DIA-formula-to-typair KIND (allnc-form-to-kernel fmla))) ((pure) (make-all-fiet (type-to-tytuple (var-to-type (allnc-form-to-var fmla))) (DIA-formula-to-typair KIND (allnc-form-to-kernel fmla)))) (else (myerror "DIA-formula-to-typair:" "unknown KIND" KIND)))) ((ex) (make-ex-fiet (type-to-tytuple (var-to-type (ex-form-to-var fmla))) (DIA-formula-to-typair KIND (ex-form-to-kernel fmla)))) ((exca excl) (DIA-formula-to-typair KIND (unfold-formula fmla))) ((exnc) (myerror "DIA-formula-to-typair:" "exnc not implemented")) ((tensor) (myerror "DIA-formula-to-typair:" "tensor not allowed here" (formula-to-string fmla))) ; (make-star-fiet ; (DIA-formula-to-typair KIND ; (tensor-form-to-left fmla)) ; (DIA-formula-to-typair KIND ; (tensor-form-to-right fmla)))) (else (myerror "DIA-formula-to-typair:" "formula expected" fmla)))) (define (make-arrow-fiet typairA typairB) (if (ntypair? typairA) (myerror "make-arrow-fiet:" "1st argument must be typair" typairA typairB) (if (ntypair? typairB) (myerror "make-arrow-fiet:" "2nd argument must be typair" typairA typairB) (make-arrow-fiet-aux typairA typairB)))) (define (make-arrow-fiet-aux typairA typairB) (let*((x (typair-left typairA)) (y (typair-right typairA)) (u (typair-left typairB)) (v (typair-right typairB)) (xv (tytuple-append x v "make-arrow-fiet 1")) (bigY (make-tytuple-arrow xv y)) (bigU (make-tytuple-arrow x u)) (YU (tytuple-append bigY bigU "make-arrow-fiet 2"))) (make-typair YU xv))) (define (make-star-fiet typairA typairB) (if (ntypair? typairA) (myerror "make-star-fiet:" "1st argument must be typair" typairA typairB) (if (ntypair? typairB) (myerror "make-star-fiet:" "2nd argument must be typair" typairA typairB) (make-star-fiet-aux typairA typairB)))) (define (make-star-fiet-aux typairA typairB) (let*((x (typair-left typairA)) (y (typair-right typairA)) (u (typair-left typairB)) (v (typair-right typairB)) (xu (tytuple-append x u "make-star-fiet 1")) (yv (tytuple-append y v "make-star-fiet 2"))) (make-typair xu yv))) (define (make-all-fiet z typairA) (if (not-tytuple? z) (myerror "make-all-fiet:" "1st argument must be tytuple" z typairA) (if (ntypair? typairA) (myerror "make-all-fiet:" "2nd argument must be typair" z typairA) (make-all-fiet-aux z typairA)))) (define (make-all-fiet-aux z typairA) (let*((x (typair-left typairA)) (y (typair-right typairA)) (bigX (make-tytuple-arrow z x)) (zy (tytuple-append z y "make-all-fiet"))) (make-typair bigX zy))) (define (make-ex-fiet z typairA) (if (not-tytuple? z) (myerror "make-ex-fiet:" "1st argument must be tytuple" z typairA) (if (ntypair? typairA) (myerror "make-ex-fiet:" "2nd argument must be typair" z typairA) (make-ex-fiet-aux z typairA)))) (define (make-ex-fiet-aux z typairA) (make-typair (tytuple-append z (typair-left typairA) "make-ex-fiet") (typair-right typairA))) ; In DIA-pvar-to-bool-term we assign a boolean term of type ; a1->a2->...->aN->bool to each pvar given as argument, ; where (a1,...,aN) is the arity of the predicate variable argument. ; This assignment is memorized for each pvar in the global variable ; DIA-PVAR-TO-BT-ALIST at the first-time call of DIA-pvar-to-bool-term ; on that pvar argument and is later referenced at the subsequent ; calls of DIA-pvar-to-bool-term on the same pvar argument (define DIA-PVAR-TO-BT-ALIST '()) (define DIA-INIT-PVAR-TO-BT-ALIST DIA-PVAR-TO-BT-ALIST) (define (DIA-pvar-to-bool-term pvar) (let ((info (assoc pvar DIA-PVAR-TO-BT-ALIST))) (if info (cadr info) (let ((DIA-newBT (pvar-to-new-bool-term pvar))) (set! DIA-PVAR-TO-BT-ALIST (cons (list pvar DIA-newBT) DIA-PVAR-TO-BT-ALIST)) DIA-newBT)))) (define (dyn-mk-arrow x) (if (list? x) (dyn-mk-arrow-aux x) (myerror "dyn-mk-arrow: list argument expected" x))) (define (dyn-mk-arrow-aux x) (if (null? x) (myerror "dyn-mk-arrow: non-empty list argument expected" x) (dyn-mk-arrow-aux-aux x))) (define (dyn-mk-arrow-aux-aux x) (if (null? (cdr x)) (car x) (make-arrow (car x) (dyn-mk-arrow-aux-aux (cdr x))))) (define (dyn-mk-term-in-app-form tm tmlst) (if (list? tmlst) (dyn-mk-term-in-app-form-aux tm tmlst) (myerror "dyn-mk-term-in-app-form: list argument expected" tmlst))) (define (dyn-mk-term-in-app-form-aux tm tmlst) (if (null? tmlst) (myerror "dyn-mk-term-in-app-form: non-empty list argument expected" tmlst) (dyn-mk-term-in-app-form-aux-aux tm tmlst))) (define (dyn-mk-term-in-app-form-aux-aux tm tmlst) (if (null? (cdr tmlst)) (make-term-in-app-form tm (car tmlst)) (dyn-mk-term-in-app-form-aux-aux (make-term-in-app-form tm (car tmlst)) (cdr tmlst)))) (define (dyn-mk-term-in-abst-form tm valst) (if (list? valst) (dyn-mk-term-in-abst-form-aux tm valst) (myerror "dyn-mk-term-in-abst-form: list argument expected" valst))) (define (dyn-mk-term-in-abst-form-aux tm valst) (if (null? valst) (myerror "dyn-mk-term-in-abst-form:" "non-empty list argument expected" valst) (dyn-mk-term-in-abst-form-aux-aux tm valst))) (define (dyn-mk-term-in-abst-form-aux-aux tm valst) (if (null? (cdr valst)) (make-term-in-abst-form (car valst) tm) (make-term-in-abst-form (car valst) (dyn-mk-term-in-abst-form-aux-aux tm (cdr valst))))) (define (pvar-to-new-bool-term pvar) (let* ((name (string-append "pc" (pvar-to-name pvar))) (arity (pvar-to-arity pvar)) (types (arity-to-types arity)) (pcnst (let((info (assoc name PROGRAM-CONSTANTS))) (if info (cadr info) (let*((new-types (append types (list (py "boole")))) (typ (dyn-mk-arrow new-types))) (begin (add-program-constant name typ) (pconst-name-to-pconst name)))))) (tm-pcnst (make-term-in-const-form pcnst)) (vars (map type-to-new-var types)) (varterms (map make-term-in-var-form vars)) (appterm (dyn-mk-term-in-app-form tm-pcnst varterms)) (rv (dyn-mk-term-in-abst-form appterm vars)) (LEGDUM (nldisplay "bool-term for pvar" (pvar-to-name pvar) "is" (term-to-string rv)))) rv)) (define (dyn-union x) (if (list? x) (dyn-union-aux x) (myerror "dyn-union: list argument expected" x))) (define (dyn-union-aux x) (if (null? x) (myerror "dyn-union: non-empty list argument expected" x) (dyn-union-aux-aux x))) (define (dyn-union-aux-aux x) (cond ((null? x) '()) ((list? (car x)) (remove-duplicates (append (car x) (dyn-union-aux-aux (cdr x))))) (else (myerror "union: list expected" (car x))))) ;; (DIA-Data KIND A) returns a 4-elements list ;; (,t_A,.free-var-list) with ;; 1st component - a vapair of existential and ;; universal vatuples ;; which correspond to the vatuples of the 2nd component ;; 2nd component - a boolean term t_A built from the free ;; variables of A which are enumerated in the "free-var-list", ;; x and y (i.e., the existential and universal variables) ;; such that: A_D(x;y;a) <-> atom(t_A[x;y;a]) ;; 3rd component - a typair of existential and ;; universal tytuples (define (DIA-Data KIND fmla) (begin (if DEBUG-DIDA (DIA-comment-forced "DIA-Data applied on" 'CNL (formula-to-string fmla))) (case (tag fmla) ((atom) (let*((tA0 (atom-form-to-kernel fmla)) (freeA0 (term-to-free tA0)) (rv (cons NULL_vapr (cons tA0 (cons NULL_typr freeA0))))) rv)) ((predicate) (if (formula=? falsity-log fmla) (let((rv (cons NULL_vapr (cons FALSE_tm (cons NULL_typr '()))))) rv) (let ((pred (predicate-form-to-predicate fmla))) (if (predconst-form? pred) NULL_typr (if (pvar-form? pred) (if (= 0 (pvar-to-h-deg pred)) (myerror "DIA-Data:" "Not yet implemented for predicates with h-deg zero") (let*((args (predicate-form-to-args fmla)) (bool-tm (DIA-pvar-to-bool-term pred)) (tm (dyn-mk-term-in-app-form bool-tm args)) (fvars (dyn-union (map term-to-free args))) (rv (cons NULL_vapr (cons tm (cons NULL_typr fvars))))) rv)) (if (idpredconst-form? pred) (myerror "DIA-Data" "inductive predicates not supported yet" pred) (myerror "DIA-Data:" "predicate expected" pred))))))) ((imp) (let*((A (DIA-Data KIND (imp-form-to-premise fmla))) (LEGDUM (if DEBUG-DIDA (DIA-comment-forced "DIA-Data " "imp - A finished"))) (typr_A (caddr A)) (vapr_A (car A)) (tm_A (cadr A)) (fv_A (cdddr A)) (typ_x (typair-left typr_A)) (typ_y (typair-right typr_A)) (var_x (vapair-left vapr_A)) (var_y (vapair-right vapr_A)) (B (DIA-Data KIND (imp-form-to-conclusion fmla))) (LEGDUM (if DEBUG-DIDA (DIA-comment-forced "DIA-Data " "imp - B finished"))) (typr_B (caddr B)) (vapr_B (car B)) (tm_B (cadr B)) (fv_B (cdddr B)) (typ_u (typair-left typr_B)) (typ_v (typair-right typr_B)) (var_u (vapair-left vapr_B)) (var_v (vapair-right vapr_B)) (typ_xv (tytuple-append typ_x typ_v "DIA-Data KIND imp 1")) (typ_BigY (make-tytuple-arrow typ_xv typ_y)) (typ_BigU (make-tytuple-arrow typ_x typ_u)) (typ_YU (tytuple-append typ_bigY typ_bigU "DIA-Data imp 2")) (var_BigY (tytuple-to-vatuple typ_BigY)) (var_BigU (tytuple-to-vatuple typ_BigU)) (var_xv (vatuple-append var_x var_v "DIA-Data imp 3")) (var_YU (vatuple-append var_BigY var_BigU "DIA-Data imp 4")) (Yxv (make-tmtuple-in-app-form (vatuple-to-tmtuple var_BigY) (vatuple-to-tmtuple var_xv))) (Ux (make-tmtuple-in-app-form (vatuple-to-tmtuple var_BigU) (vatuple-to-tmtuple var_x))) (new_tm_A (DIA-term-non-simult-subst tm_A (DIA-make-alist var_y Yxv "DIA-Data (imp) y Yxv :"))) (new_tm_B (DIA-term-non-simult-subst tm_B (DIA-make-alist var_u Ux "DIA-Data (imp) u Ux :"))) (tm_AimpB (mk-term-in-if-form new_tm_A (list new_tm_B TRUE_tm))) (fv_AimpB (union fv_A fv_B)) (rv (cons (make-vapair var_YU var_xv) (cons tm_AimpB (cons (make-typair typ_YU typ_xv) fv_AimpB))))) (begin (if DEBUG-DIDA (DIA-comment-forced "DIA-Data " "IMP finished " (term-to-string tm_AimpB))) rv))) ((and) (let*((A (DIA-Data KIND (and-form-to-left fmla))) (LEGDUM (if DEBUG-DIDA (DIA-comment-forced "DIA-Data " "and - A finished"))) (typr_A (caddr A)) (vapr_A (car A)) (tm_A (cadr A)) (fv_A (cdddr A)) (typ_x (typair-left typr_A)) (typ_y (typair-right typr_A)) (var_x (vapair-left vapr_A)) (var_y (vapair-right vapr_A)) (B (DIA-Data KIND (and-form-to-right fmla))) (LEGDUM (if DEBUG-DIDA (DIA-comment-forced "DIA-Data " "and - B finished"))) (typr_B (caddr B)) (vapr_B (car B)) (tm_B (cadr B)) (fv_B (cdddr B)) (typ_u (typair-left typr_B)) (typ_v (typair-right typr_B)) (var_u (vapair-left vapr_B)) (var_v (vapair-right vapr_B)) (typ_xu (tytuple-append typ_x typ_u "DIA-Data and 1")) (typ_yv (tytuple-append typ_y typ_v "DIA-Data and 2")) (var_xu (vatuple-append var_x var_u "DIA-Data and 3")) (var_yv (vatuple-append var_y var_v "DIA-Data and 4")) (tm_AandB (mk-term-in-if-form tm_A (list tm_B FALSE_tm))) (fv_AandB (union fv_A fv_B)) (rv (cons (make-vapair var_xu var_yv) (cons tm_AandB (cons (make-typair typ_xu typ_yv) fv_AandB))))) (begin (if DEBUG-DIDA (DIA-comment-forced "DIA-Data " "AND finished " (term-to-string tm_AandB))) rv))) ((all) (let*((A (DIA-Data KIND (all-form-to-kernel fmla))) (LEGDUM (if DEBUG-DIDA (DIA-comment-forced "DIA-Data " "all - A finished"))) (typr_A (caddr A)) (vapr_A (car A)) (tm_A (cadr A)) (fv_A (cdddr A)) (typ_x (typair-left typr_A)) (typ_y (typair-right typr_A)) (var_x (vapair-left vapr_A)) (var_y (vapair-right vapr_A)) (var_z (var-to-vatuple (all-form-to-var fmla))) (typ_z (vatuple-to-tytuple var_z)) (new_z (tytuple-to-vatuple typ_z)) (typ_zy (tytuple-append typ_z typ_y "DIA-Data all 1")) (var_zy (vatuple-append new_z var_y "DIA-Data all 2")) (typ_BigX (make-tytuple-arrow typ_z typ_x)) (var_BigX (tytuple-to-vatuple typ_BigX)) (Xz (make-tmtuple-in-app-form (vatuple-to-tmtuple var_BigX) (vatuple-to-tmtuple new_z))) (alst (DIA-make-alist (vatuple-append var_x var_z "DIA-Data all 3") (tmtuple-append Xz (vatuple-to-tmtuple new_z) "DIA-Data all 4") "DIA-Data (all): ")) (new_tm (DIA-term-non-simult-subst tm_A alst)) (new_fv (set-minus fv_A var_z)) (rv (cons (make-vapair var_BigX var_zy) (cons new_tm (cons (make-typair typ_BigX typ_zy) new_fv))))) (begin (if DEBUG-DIDA (DIA-comment-forced "DIA-Data " "ALL finished " (term-to-string new_tm))) rv))) ((allnc) (case KIND ((light monot) (DIA-Data KIND (allnc-form-to-kernel fmla))) ((pure) (let*((A (DIA-Data KIND (allnc-form-to-kernel fmla))) (LEGDUM (if DEBUG-DIDA (DIA-comment-forced "DIA-Data" "allnc - A finished"))) (typr_A (caddr A)) (vapr_A (car A)) (tm_A (cadr A)) (fv_A (cdddr A)) (typ_x (typair-left typr_A)) (typ_y (typair-right typr_A)) (var_x (vapair-left vapr_A)) (var_y (vapair-right vapr_A)) (var_z (var-to-vatuple (allnc-form-to-var fmla))) (typ_z (vatuple-to-tytuple var_z)) (new_z (tytuple-to-vatuple typ_z)) (typ_zy (tytuple-append typ_z typ_y "DIA-Data PDI allnc 1")) (var_zy (vatuple-append new_z var_y "DIA-Data PDI allnc 2")) (typ_BigX (make-tytuple-arrow typ_z typ_x)) (var_BigX (tytuple-to-vatuple typ_BigX)) (Xz (make-tmtuple-in-app-form (vatuple-to-tmtuple var_bigX) (vatuple-to-tmtuple new_z))) (new_tm (DIA-term-non-simult-subst tm_A (DIA-make-alist (vatuple-append var_x var_z "DIA-Data PDI allnc 3") (tmtuple-append Xz (vatuple-to-tmtuple new_z) "DIA-Data PDI allnc 4") "DIA-Data PDI (allnc): "))) (new_fv (set-minus fv_A var_z)) (rv (cons (make-vapair var_BigX var_zy) (cons new_tm (cons (make-typair typ_BigX typ_zy) new_fv))))) (begin (if DEBUG-DIDA (DIA-comment-forced "DIA-Data PDI" "ALLNC finished " (term-to-string new_tm))) rv))) (else (myerror "DIA-Data: unknown KIND" KIND)))) ((ex) (let*((A (DIA-Data KIND (ex-form-to-kernel fmla))) (LEGDUM (if DEBUG-DIDA (DIA-comment-forced "DIA-Data " "ex - A finished"))) (typr_A (caddr A)) (vapr_A (car A)) (tm_A (cadr A)) (fv_A (cdddr A)) (typ_x (typair-left typr_A)) (typ_y (typair-right typr_A)) (var_x (vapair-left vapr_A)) (var_y (vapair-right vapr_A)) (var_z (var-to-vatuple (ex-form-to-var fmla))) (typ_z (vatuple-to-tytuple var_z)) (new_z (tytuple-to-vatuple typ_z)) (typ_zx (tytuple-append typ_z typ_x "DIA-Data ex 1")) (var_zx (vatuple-append new_z var_x "DIA-Data ex 2")) (new_tm (DIA-term-non-simult-subst tm_A (DIA-make-alist var_z (vatuple-to-tmtuple new_z) "DIA-Data (ex): "))) (new_fv (set-minus fv_A var_z)) (rv (cons (make-vapair var_zx var_y) (cons new_tm (cons (make-typair typ_zx typ_y) new_fv))))) (begin (if DEBUG-DIDA (DIA-comment-forced "DIA-Data " "EX finished " (term-to-string new_tm))) rv))) ((exca excl) (DIA-Data KIND (unfold-formula fmla))) ((exnc) (myerror "DIA-Data:" "exnc not implemented" (formula-to-string fmla))) ((tensor) (myerror "DIA-Data:" "tensor not allowed here" (formula-to-string fmla))) (else (myerror "DIA-Data:" "syntactic error in formula" fmla))))) (define (avar-table-count avr alist) (if (null? alist) (list (cons avr 1)) (if (avar=? avr (caar alist)) (cons (cons avr (+ (cdar alist) 1)) (cdr alist)) (cons (car alist) (avar-table-count avr (cdr alist)))))) (define (avar-table avr alist) (cond ((null? alist) #f) ((avar=? avr (caar alist)) (cdar alist)) (else (avar-table avr (cdr alist))))) (define (DIA-Occur-table-to-string alst) (DIA-avar-table-to-string alst number->string)) (define (DIA-avar-table-to-string alst to_string_proc) (if (null? alst) "[ EMPTY-ALIST ]" (string-append " [ " SBK3 (DIA-avar-table-to-string-aux alst to_string_proc)))) (define (DIA-avar-table-to-string-aux alst to_string_proc) (if (null? alst) " ]" (string-append " , " (avar-to-string (caar alst)) " -> " (to_string_proc (cdar alst)) (DIA-avar-table-to-string-aux (cdr alst) to_string_proc)))) ; (DIA-make-avar-to-Data KIND) creates a new list of associations ; of Data to assumption variables. Data is a list with 1st ; element the number of occurences of that assumption variable ; in the argument proof - this is determined in the first ; "optimization" pass (when y is #t). The 2nd element of Data ; is a vapair thus uniquely associated to the assumption var. ; If the assumption var A occurs at least twice in the proof ; then the list contains also an associated term t_A such that ; A_D(x;y;a) <--> t_A(x;y;a), a typair corresponding to the ; vapair and in the end the list of free variables of the ; formula A (define (DIA-make-avar-to-Data KIND) (let((avar-Occur-list '()) (avar-Vapr-list '()) (avar-Data-list '())) (lambda (avr flg) (begin (if (not-avar? avr) (myerror "DIA-make-avar-to-Data: " "avar argument expected")) (if DEBUG-MAVD (nldisplay "DIA-make-avar-to-Data:" "invoked with flag " flg "for avar " (avar-to-string avr))) (if flg (let((new-Occur-list (avar-table-count avr avar-Occur-list))) (begin (set! avar-Occur-list new-Occur-list) new-Occur-list)) (let((LEGDUM (if DEBUG-MAVD (nldisplay "Occur list: " (DIA-Occur-table-to-string avar-Occur-list)))) (info (avar-table avr avar-Occur-list)) (fmla (avar-to-formula avr))) (case info ((#f) (begin (if DEBUG-MAVD (nldisplay "Avar-to-Data:" "detected assumption variable" (avar-to-string avr) "with no occurence in proof." "Formula is \n" (formula-to-string fmla))) (list 0 #f))) ((1) (begin (if DEBUG-MAVD (nldisplay "Avar-to-Data:" "OK, assumption variable" (avar-to-string avr) "with 1 occurence will" "be added to VAPR list." "Formula is \n" (formula-to-string fmla))) (let((vapr (avar-table avr avar-Vapr-list))) (if vapr (list info vapr) (let((new-vapr (typair-to-vapair (DIA-formula-to-typair KIND fmla)))) (begin (set! avar-Vapr-list (cons (cons avr new-vapr) avar-Vapr-list)) (list info new-vapr))))))) (else (let((data (avar-table avr avar-Data-list))) (if data (cons info data) (let((new-data (DIA-Data KIND fmla))) (begin (if DEBUG-MAVD (nldisplay "Avar-to-Data:" "OK, assumption variable" (avar-to-string avr) "with " info "occurences will" "be added to DATA list." "Formula is \n" (formula-to-string fmla))) (set! avar-Data-list (cons (cons avr new-data) avar-Data-list)) (cons info new-data))))))))))))) ;; DIA-OPTone-preproc is a preprocessing phase to the ;; program-extraction in which also the Stab ;; and Stab-Log not containing strong ex are ;; replaced by their proofs, i.e. a Stab ;; elimination procedure is comprised in the ;; DIA-OPTone-preproc phase (define (DIA-OPTone-preproc prf) (DIA-OPTone-preproc-aux prf)) (define (DIA-OPTone-preproc-aux prf) (case (tag prf) ((proof-in-avar-form) prf) ((proof-in-aconst-form) (let*((aconst (proof-in-aconst-form-to-aconst prf)) (name (aconst-to-name aconst))) (cond ((string=? name "Stab") (let*((fmla (proof-to-formula prf)) (vars-and-final-kernel (allnc-form-to-vars-and-final-kernel fmla)) (vars (car vars-and-final-kernel)) (kernel (cadr vars-and-final-kernel)) (concl (unfold-formula (imp-form-to-conclusion kernel)))) (DIA-OPTone-preproc-aux (apply mk-proof-in-nc-intro-form (append vars (list (proof-of-stab-at concl)))) ))) ((string=? name "Stab-Log") (let*((fmla (proof-to-formula prf)) (vars-and-final-kernel (allnc-form-to-vars-and-final-kernel fmla)) (vars (car vars-and-final-kernel)) (kernel (cadr vars-and-final-kernel)) (concl (unfold-formula (imp-form-to-conclusion kernel)))) (DIA-OPTone-preproc-aux (apply mk-proof-in-nc-intro-form (append vars (list (proof-of-stab-log-at concl)))) ))) (else prf)))) ((proof-in-imp-elim-form) (let((op (proof-in-imp-elim-form-to-op prf)) (arg (proof-in-imp-elim-form-to-arg prf))) (make-proof-in-imp-elim-form (DIA-OPTone-preproc-aux op) (DIA-OPTone-preproc-aux arg)))) ((proof-in-imp-intro-form) (let((avar (proof-in-imp-intro-form-to-avar prf)) (kernel (proof-in-imp-intro-form-to-kernel prf))) (make-proof-in-imp-intro-form avar (DIA-OPTone-preproc-aux kernel)))) ((proof-in-and-intro-form) (let((left (proof-in-and-intro-form-to-left prf)) (right (proof-in-and-intro-form-to-right prf))) (make-proof-in-and-intro-form (DIA-OPTone-preproc-aux left) (DIA-OPTone-preproc-aux right)))) ((proof-in-and-elim-left-form) (let((kernel (proof-in-and-elim-left-form-to-kernel prf))) (make-proof-in-and-elim-left-form ;inserted M.S. (DIA-OPTone-preproc-aux kernel)))) ((proof-in-and-elim-right-form) (let((kernel (proof-in-and-elim-right-form-to-kernel prf))) (make-proof-in-and-elim-right-form ;inserted M.S. (DIA-OPTone-preproc-aux kernel)))) ((proof-in-tensor-intro-form) (let((left (proof-in-tensor-intro-form-to-left prf)) (right (proof-in-tensor-intro-form-to-right prf))) (make-proof-in-tensor-intro-form (DIA-OPTone-preproc-aux left) (DIA-OPTone-preproc-aux right)))) ((proof-in-tensor-elim-left-form) (let((kernel (proof-in-tensor-elim-left-form-to-kernel prf))) (make-proof-in-tensor-elim-left-form (DIA-OPTone-preproc-aux kernel)))) ((proof-in-tensor-elim-right-form) (let((kernel (proof-in-tensor-elim-right-form-to-kernel prf))) (make-proof-in-tensor-elim-right-form (DIA-OPTone-preproc-aux kernel)))) ((proof-in-all-intro-form) (let((var (proof-in-all-intro-form-to-var prf)) (kernel (DIA-OPTone-preproc-aux (proof-in-all-intro-form-to-kernel prf) ))) (if (proof-in-all-elim-form? kernel) (let*((tm (proof-in-all-elim-form-to-arg kernel)) (LEGDUM (if DEBUG (nldisplay "all-intro follows an all-elim " " var = " (var-to-string var) " tm = " (term-to-string tm))))) (if (term-in-var-form? tm) (if (equal? var (term-in-var-form-to-var tm)) (begin (if DEBUG (nldisplay "OK, optimized")) (proof-in-all-elim-form-to-op kernel)) (make-proof-in-all-intro-form var kernel)) (make-proof-in-all-intro-form var kernel))) (make-proof-in-all-intro-form var kernel)))) ((proof-in-all-elim-form) (let((op (DIA-OPTone-preproc-aux (proof-in-all-elim-form-to-op prf) )) (arg (proof-in-all-elim-form-to-arg prf))) (if (term-in-var-form? arg) (if (proof-in-all-intro-form? op) (let*((var (proof-in-all-intro-form-to-var op)) (LEGDUM (if DEBUG (nldisplay "all-elim follows an all-intro " " var = " (var-to-string var) " tm = " (term-to-string arg))))) (if (equal? var (term-in-var-form-to-var arg)) (begin (if DEBUG (nldisplay "OK, optimized")) (proof-in-all-intro-form-to-kernel op)) (make-proof-in-all-elim-form op arg))) (make-proof-in-all-elim-form op arg)) (make-proof-in-all-elim-form op arg)))) ((proof-in-allnc-intro-form) (let((var (proof-in-allnc-intro-form-to-var prf)) (kernel (DIA-OPTone-preproc-aux (proof-in-allnc-intro-form-to-kernel prf)))) (if (proof-in-allnc-elim-form? kernel) (let*((tm (proof-in-allnc-elim-form-to-arg kernel)) (LEGDUM (if DEBUG (nldisplay "allnc-intro follows an allnc-elim " " var = " (var-to-string var) " tm = " (term-to-string tm))))) (if (term-in-var-form? tm) (if (equal? var (term-in-var-form-to-var tm)) (begin (if DEBUG (nldisplay "OK, optimized")) (proof-in-allnc-elim-form-to-op kernel)) (make-proof-in-allnc-intro-form var kernel)) (make-proof-in-allnc-intro-form var kernel))) (make-proof-in-allnc-intro-form var kernel)))) ((proof-in-allnc-elim-form) (let((op (DIA-OPTone-preproc-aux (proof-in-allnc-elim-form-to-op prf))) (arg (proof-in-allnc-elim-form-to-arg prf))) (if (term-in-var-form? arg) (if (proof-in-allnc-intro-form? op) (let*((var (proof-in-allnc-intro-form-to-var op)) (LEGDUM (if DEBUG (nldisplay "allnc-elim follows an allnc-intro " " var = " (var-to-string var) " tm = " (term-to-string arg))))) (if (equal? var (term-in-var-form-to-var arg)) (begin (if DEBUG (nldisplay "OK, optimized")) (proof-in-allnc-intro-form-to-kernel op)) (make-proof-in-allnc-elim-form op arg))) (make-proof-in-allnc-elim-form op arg)) (make-proof-in-allnc-elim-form op arg)))) (else (myerror "DIA-OPTone-preproc-aux: proof tag expected" (tag prf))))) ;; DIA-Data-preproc is a pre-processing phase ;; where an evidence of the number of occurences ;; of each assumption in the proof is created (define (DIA-Data-preproc prf avar-to-Data) (case (tag prf) ((proof-in-avar-form) (begin (avar-to-Data (proof-in-avar-form-to-avar prf)) 1)) ((proof-in-imp-elim-form) (+ (DIA-Data-preproc (proof-in-imp-elim-form-to-op prf) avar-to-Data) (DIA-Data-preproc (proof-in-imp-elim-form-to-arg prf) avar-to-Data))) ((proof-in-imp-intro-form) (- (DIA-Data-preproc (proof-in-imp-intro-form-to-kernel prf) avar-to-Data) 1)) ((proof-in-and-intro-form) (+ (DIA-Data-preproc (proof-in-and-intro-form-to-left prf) avar-to-Data) (DIA-Data-preproc (proof-in-and-intro-form-to-right prf) avar-to-Data))) ((proof-in-and-elim-left-form) (DIA-Data-preproc (proof-in-and-elim-left-form-to-kernel prf) avar-to-Data)) ((proof-in-and-elim-right-form) (DIA-Data-preproc (proof-in-and-elim-right-form-to-kernel prf) avar-to-Data)) ((proof-in-tensor-intro-form) (+ (DIA-Data-preproc (proof-in-tensor-intro-form-to-left prf) avar-to-Data) (DIA-Data-preproc (proof-in-tensor-intro-form-to-right prf) avar-to-Data))) ((proof-in-tensor-elim-left-form) (DIA-Data-preproc (proof-in-tensor-elim-left-form-to-kernel prf) avar-to-Data)) ((proof-in-tensor-elim-right-form) (DIA-Data-preproc (proof-in-tensor-elim-right-form-to-kernel prf) avar-to-Data)) ((proof-in-all-intro-form) (DIA-Data-preproc (proof-in-all-intro-form-to-kernel prf) avar-to-Data)) ((proof-in-all-elim-form) (DIA-Data-preproc (proof-in-all-elim-form-to-op prf) avar-to-Data)) ((proof-in-allnc-intro-form) (DIA-Data-preproc (proof-in-allnc-intro-form-to-kernel prf) avar-to-Data)) ((proof-in-allnc-elim-form) (DIA-Data-preproc (proof-in-allnc-elim-form-to-op prf) avar-to-Data)) (else 0))) (define (DIA-display-NC-Prf prf) (begin (DIA-comment "NonComp-Conclusion: " 'CNL (formula-to-string (nf (proof-to-formula prf)))) (DIA-display-NC-Hyps? (proof-to-free-avars prf)))) (define (DIA-display-NC-Hyps? avar-list) (if (null? avar-list) (DIA-comment "END of NonComp-Assumptions") (begin (DIA-display-NC-Hyp (car avar-list)) (DIA-display-NC-Hyps? (cdr avar-list))))) (define (DIA-display-NC-Hyp avr) (DIA-comment (avar-to-string avr) " ==> " (formula-to-string (nf (avar-to-formula avr))))) (define (DIA-NonComp? KIND prf) (if (proof-in-avar-form? prf) #f ; (if (proof-in-aconst-form? prf) ; (case (aconst-to-kind (proof-in-aconst-form-to-aconst prf)) ; ((theorem) ; (begin ; (nldisplay "DIA-NonComp? KIND theorem" ; (formula-to-string (proof-to-formula prf))) ; (DIA-NonComp-Conc? KIND (proof-to-formula prf)))) ; ((axiom) #f) ; (else ; (begin ; (nldisplay "DIA-NonComp? KIND theorem" ; (formula-to-string (proof-to-formula prf))) ; (DIA-NonComp-Conc? KIND (proof-to-formula prf))))) (and (DIA-NonComp-Conc? KIND (proof-to-formula prf)) (DIA-NonComp-Hyps? KIND (proof-to-free-avars prf))))) (define (DIA-NonComp-Conc? KIND fmla) (let((vatup (vapair-left (car (DIA-Data KIND fmla))))) (if (NULL-vatuple? vatup) vatup #f))) (define (DIA-NonComp-Hyps? KIND avar-list) (if (null? avar-list) #t (and (DIA-NonComp-Hyp? KIND (car avar-list)) (DIA-NonComp-Hyps? KIND (cdr avar-list))))) (define (DIA-NonComp-Hyp? KIND avr) (let((vatup (vapair-right (car (DIA-Data KIND (avar-to-formula avr)))))) (if (NULL-vatuple? vatup) vatup #f))) (define (DIA-NonComp-proof-to-ux KIND prf free-avars) (begin (case (tag prf) ((proof-in-avar-form) (let*((avr (proof-in-avar-form-to-avar prf)) (fmla (avar-to-formula avr)) (tv (if (member-wrt avar=? avr free-avars) #t #f)) (rv (if tv (vapair-left (car (DIA-Data KIND fmla))) NULL_vatup))) (begin rv))) ((proof-in-aconst-form) NULL_vatup) ((proof-in-imp-intro-form) (DIA-NonComp-proof-to-ux KIND (proof-in-imp-intro-form-to-kernel prf) free-avars)) ((proof-in-imp-elim-form) (vatuple-append (DIA-NonComp-proof-to-ux KIND (proof-in-imp-elim-form-to-op prf) free-avars) (DIA-NonComp-proof-to-ux KIND (proof-in-imp-elim-form-to-arg prf) free-avars) "DIA-NonComp-proof-to-ux : imp-elim")) ((proof-in-and-intro-form) (vatuple-append (DIA-NonComp-proof-to-ux KIND (proof-in-and-intro-form-to-left prf) free-avars) (DIA-NonComp-proof-to-ux KIND (proof-in-and-intro-form-to-right prf) free-avars) "DIA-NonComp-proof-to-ux : and-intro")) ((proof-in-and-elim-left-form) (DIA-NonComp-proof-to-ux KIND (proof-in-and-elim-left-form-to-kernel prf) free-avars)) ((proof-in-and-elim-right-form) (DIA-NonComp-proof-to-ux KIND (proof-in-and-elim-right-form-to-kernel prf) free-avars)) ((proof-in-tensor-intro-form) (vatuple-append (DIA-NonComp-proof-to-ux KIND (proof-in-tensor-intro-form-to-left prf) free-avars) (DIA-NonComp-proof-to-ux KIND (proof-in-tensor-intro-form-to-right prf) free-avars) "DIA-NonComp-proof-to-ux : tensor-intro")) ((proof-in-tensor-elim-left-form) (DIA-NonComp-proof-to-ux KIND (proof-in-tensor-elim-left-form-to-kernel prf) free-avars)) ((proof-in-tensor-elim-right-form) (DIA-NonComp-proof-to-ux KIND (proof-in-tensor-elim-right-form-to-kernel prf) free-avars)) ((proof-in-all-intro-form) (DIA-NonComp-proof-to-ux KIND (proof-in-all-intro-form-to-kernel prf) free-avars)) ((proof-in-all-elim-form) (DIA-NonComp-proof-to-ux KIND (proof-in-all-elim-form-to-op prf) free-avars)) ((proof-in-allnc-intro-form) (DIA-NonComp-proof-to-ux KIND (proof-in-allnc-intro-form-to-kernel prf) free-avars)) ((proof-in-allnc-elim-form) (DIA-NonComp-proof-to-ux KIND (proof-in-allnc-elim-form-to-op prf) free-avars)) (else (myerror "DIA-NonComp-proof-to-ux: proof expected" prf))))) (define (DIA-NonComp-tmtupalist KIND avar-list) (if (null? avar-list) NULL_tmtupalst (let((avr (car avar-list))) (cons (cons avr (vatuple-to-tmtuple (DIA-NonComp-Hyp? KIND avr))) (DIA-NonComp-tmtupalist KIND (cdr avar-list)))))) (define (DIA-NonComp-vatmp KIND prf) (let*((conc (proof-to-formula prf)) (hyps (proof-to-free-avars prf)) (y (vapair-right (car (DIA-Data KIND conc)))) (ux (DIA-NonComp-proof-to-ux KIND prf hyps)) (vapr (make-vapair y ux)) (tmpr (make-tmpair (vatuple-to-tmtuple (DIA-NonComp-Conc? KIND conc)) (DIA-NonComp-tmtupalist KIND hyps))) (rv (make-vatmpair vapr tmpr))) (begin rv))) ;; For the two proofs built by the DIA-extraction system ;; during the treatment of Induction also a dedicated ;; extraction procedure is more useful (define (Ind-extracted-vatmpair KIND prf) (let*((old-debug DEBUG) (LEGDUM (set! DEBUG DEBUG-IND-AX)) (avar-to-Data (DIA-make-avar-to-Data KIND)) (avar-to-Data-ONE (lambda (x) (avar-to-Data x #t))) (LEGDUM (DIA-Data-preproc prf avar-to-Data-ONE)) (avar-to-Data-TWO (lambda (x) (avar-to-Data x #f))) (vatmpr (DIA-extr-vatmpair-aux KIND prf avar-to-Data-TWO)) (LEGDUM (set! DEBUG old-debug))) vatmpr)) (define (DIA-extract KIND prf) (case KIND ((pure light monot) (DIA-reset-counters) (let ((rv (DIA-extr-vatmpair KIND prf))) (DIA-display-counters) rv)) (else (myerror "DIA-extract: unknown KIND" KIND)))) (define (DIA-extr-vatmpair KIND prf) (case KIND ((pure light monot) (if (DIA-NonComp? KIND prf) (begin (if EXTRACT-VERBOSE (DIA-comment-forced "DIA-extr-vatmpair:" "Non-Computational Sub-Proof detected")) (if DEBUG (DIA-display-NC-Prf prf)) (DIA-NonComp-vatmp KIND prf)) (let*((str-fmla (formula-to-string (nf (proof-to-formula prf)))) (LEGDUM (if EXTRACT-VERBOSE (DIA-comment-forced "DIA-extr-vatmpair: NEW EXTRACTION STARTS"))) (LEGDUM (if DEBUG (DIA-comment "Extraction from a proof of formula" 'CNL str-fmla))) (pre-prf (DIA-OPTone-preproc prf)) (LEGDUM (if EXTRACT-VERBOSE (DIA-comment-forced "OK pre-processing phase ONE ended"))) (nrm-prf (if NORMALIZE-PROOF (let*((norm-prf (DIA-time (np pre-prf)))) (begin (if EXTRACT-VERBOSE (DIA-comment-forced "OK normalization of proof ENDED")) norm-prf)) pre-prf)) (avar-to-Data (DIA-make-avar-to-Data KIND)) (avar-to-Data-ONE (lambda (x) (avar-to-Data x #t))) (LEGDUM (DIA-Data-preproc nrm-prf avar-to-Data-ONE)) (avar-to-Data-TWO (lambda (x) (avar-to-Data x #f))) (vatmpr (DIA-time (DIA-extr-vatmpair-aux KIND nrm-prf avar-to-Data-TWO))) (LEGDUM (begin (if EXTRACT-VERBOSE (DIA-comment-forced "DIA-extr-vatmpair: EXTRACTION ENDED")) (if DEBUG (DIA-comment "Extraction from a proof of formula" 'CNL str-fmla)))) (fmla (proof-to-formula nrm-prf)) (tmpr (vatmpair-to-tmpair vatmpr)) (tmtup (tmpair-to-tuple tmpr)) ;;; (extra-free-vars '()) (extra-free-vars (set-minus (tmtuple-to-free tmtup) (formula-to-free fmla))) (LEGDUM (if (not (null? extra-free-vars)) (nldisplay "WARNING: in DIA-extr-vatmpair, " "after the return from DIA-extr-vatmpair-aux," "extracted terms have not NULL extra-free-vars !!!"))) (new-vatmpr (if (null? extra-free-vars) vatmpr (let*((alst (DIA-make-ZERO-alist extra-free-vars)) (new-tmtup (DIA-tmtuple-non-simult-subst tmtup alst))) (begin (if EXTRACT-VERBOSE (DIA-comment-forced "Some extra free vars" "of the extracted terms" "are substituted with ZEROs" 'CNL (DIA-non-simult-alist-to-string alst) 'CNL "The list of uncanceled assumptions is" 'CNL (tmtuplealist-to-string (tmpair-to-alist tmpr))) (make-vatmpair (vatmpair-to-vapair vatmpr) (make-tmpair new-tmtup (tmpair-to-alist tmpr)))))))) (norm-vatmpr (nbe-normalize-vatmpair-of THEOREM-NORMALIZE new-vatmpr))) norm-vatmpr))) (else (myerror "DIA-extr-vatmpair:" "unknown KIND" KIND)))) (define (DIA-extr-vatmpair-aux KIND prf avar-to-Data) (if (DIA-NonComp? KIND prf) (begin (if EXTRACT-VERBOSE (DIA-comment-forced "DIA-extr-vatmpair-aux:" "Non-Computational Sub-Proof detected")) (if DEBUG (DIA-display-NC-Prf prf)) (DIA-NonComp-vatmp KIND prf)) (case (tag prf) ((proof-in-avar-form) (let*((avar (proof-in-avar-form-to-avar prf)) (data (avar-to-Data avar)) (LEGDUM (if DEBUG (begin (nldisplay "PROOF-IN-AVAR-FORM:" (avar-to-string avar) "occuring " (car data) "times in the proof.") (DIA-comment "Formula is " (formula-to-string (nf (avar-to-formula avar))))))) (vapr (cadr data)) (x (vapair-left vapr)) (y (vapair-right vapr)) (xy (vatuple-append x y "proof-in-avar-form")) (tm-x (vatuple-to-tmtuple x)) (tm-y (vatuple-to-tmtuple y)) (T-lst (list (cons avar (make-tmtuple-in-abst-form xy tm-y)))) (T (make-tmtuple-in-abst-form x tm-x)) (rv (make-vatmpair (make-vapair y x) (make-tmpair T T-lst))) (LEGDUM (DIA-tyva-check KIND prf (proof-to-formula prf) rv "proof-in-avar-form"))) rv)) ((proof-in-aconst-form) (let*((aconst (proof-in-aconst-form-to-aconst prf)) (name (aconst-to-name aconst)) (LEGDUM (if DEBUG (nldisplay "PROOF-IN-aconst-FORM:" name )))) (case (aconst-to-kind aconst) ((axiom) (cond ((string=? "Ind" name) ;;; (DIA-IndAx-extract KIND aconst) ;;; MDH -- 071117 uncomment this ;;; after having loaded the code from IndAxSrc.scm if you really want ... (myerror "DIA-extr-vatmpair: " "we strongly discourage the treatement" " of the Induction Axiom, since it should only be used as part" " of the Induction Rule" "We therefore moved the specific code" " into a separate procedure DIA-IndAx-extract in IndAxSrc.scm" " This is meant to be used for a few experimental IndAx samples" " where the induction formula has low logical complexity," "basically quantifier-free and purely-existential" "This message is normally a sign of error in the proof at input." "Otherwise, if you really want, you can re-include the code" "as indicated at the right place in extraction module newfiets.scm")) ((string=? "Cases" name) (myerror "DIA-extr-vatmpair: " "Cases not implemented")) ((string=? "Ex-Intro" name) (let*((ex-fmla (aconst-to-repro-formula1 aconst)) (LEGDUM (if DEBUG (nldisplay "PROOF-IN-Ex-Intro-FORM" (formula-to-string ex-fmla)))) (z (var-to-vatuple (ex-form-to-var ex-fmla))) (kernel (ex-form-to-kernel ex-fmla)) (typr (DIA-formula-to-typair KIND kernel)) (x (tytuple-to-vatuple (typair-left typr))) (y (tytuple-to-vatuple (typair-right typr))) (zx (vatuple-append z x "Ex-Intro 1")) (zxy (vatuple-append zx y "Ex-Intro 2")) (tm-Y (make-tmtuple-in-abst-form zxy (vatuple-to-tmtuple y))) (tm-ZU (make-tmtuple-in-abst-form zx (vatuple-to-tmtuple (tuple-append z x)))) (tm-YZU (tmtuple-append tm-Y tm-ZU "Ex-Intro")) (rv (make-vatmpair (make-vapair zxy NULL_vatup) (make-tmpair tm-YZU NULL_tmtupalst))) (LEGDUM (if DEBUG (nldisplay rv "FINISHED Ex-Intro")))) rv)) ((string=? "Ex-Elim" name) (let*((ex-fmla (aconst-to-repro-formula1 aconst)) (concl (aconst-to-repro-formula2 aconst)) (LEGDUM (if DEBUG (nldisplay "PROOF-IN-Ex-Elim-FORM" "ex-form = " (formula-to-string ex-fmla) "concl" (formula-to-string concl)))) (z (var-to-vatuple (ex-form-to-var ex-fmla))) (kernel (ex-form-to-kernel ex-fmla)) (kernel-typr (DIA-formula-to-typair KIND kernel)) (x (tytuple-to-vatuple (typair-left kernel-typr))) (yp (tytuple-to-vatuple (typair-right kernel-typr))) (concl-typr (DIA-formula-to-typair KIND concl)) (up (tytuple-to-vatuple (typair-left concl-typr))) (v (tytuple-to-vatuple (typair-right concl-typr))) (zx (vatuple-append z x "Ex-Elim 1")) (zxv (vatuple-append zx v "Ex-Elim 2")) (typ_y (make-tytuple-arrow (vatuple-to-tytuple zxv) (vatuple-to-tytuple yp))) (y (tytuple-to-vatuple typ_y)) (typ_u (make-tytuple-arrow (vatuple-to-tytuple zx) (vatuple-to-tytuple up))) (u (tytuple-to-vatuple typ_u)) (zxy (vatuple-append zx y "Ex-Elim 3")) (zxyu (vatuple-append zxy u "Ex-Elim 4")) (zxyuv (vatuple-append zxyu v "Ex-Elim 5")) (tm-yp (make-tmtuple-in-app-form (vatuple-to-tmtuple y) (vatuple-to-tmtuple zxv))) (tm-ypz (tmtuple-append tm-yp (vatuple-to-tmtuple z) "Ex-Elim 1")) (tm-xv (tmtuple-append (vatuple-to-tmtuple x) (vatuple-to-tmtuple v) "Ex-Elim 2")) (tm-YZXV (make-tmtuple-in-abst-form zxyuv (tmtuple-append tm-ypz tm-xv "Ex-Elim 3"))) (tm-U (make-tmtuple-in-abst-form zxyu (make-tmtuple-in-app-form (vatuple-to-tmtuple u) (vatuple-to-tmtuple zx)))) (tm-YZXVU (tmtuple-append tm-YZXV tm-U "Ex-Elim 4")) (rv (make-vatmpair (make-vapair zxyuv NULL_vatup) (make-tmpair tm-YZXVU NULL_tmtupalst))) (LEGDUM (if DEBUG (nldisplay "FINISHED Ex-Elim")))) rv)) ((string=? "Exnc-Elim" name) (myerror "DIA-extr-vatmpair: " "Exnc-Elim not implemented")) ((string=? "Exnc-Intro" name) (myerror "DIA-extr-vatmpair: " "Exnc-Intro not implemented")) ((or (string=? "Intro" name) (string=? "Elim" name)) (myerror "DIA-extr-vatmpair: " "Inductive Definitions not implemented")) ((string=? "Eq-Compat" name) (let*((LEGDUM (if DEBUG (nldisplay "PROOF-IN-Eq-Compat-FORM"))) (fmla (aconst-to-inst-formula aconst)) (kernel (imp-form-to-conclusion (imp-form-to-conclusion fmla))) (typr (DIA-formula-to-typair KIND kernel)) (u (tytuple-to-vatuple (typair-left typr))) (v (tytuple-to-vatuple (typair-right typr))) (uv (vatuple-append u v "Eq-Compat")) (Tv (make-tmtuple-in-abst-form uv (vatuple-to-tmtuple v))) (Tu (make-tmtuple-in-abst-form u (vatuple-to-tmtuple u))) (Tvu (tmtuple-append Tv Tu "Eq-Compat")) (rv (make-vatmpair (make-vapair uv NULL_vatup) (make-tmpair Tvu NULL_tmtupalst))) (LEGDUM (if DEBUG (nldisplay "FINISHED Eq-Compat")))) rv)) ((string=? "Truth-Axiom" name) (begin (if DEBUG (nldisplay "TRUTH-AXIOM")) (make-vatmpair (make-vapair NULL_vatup NULL_vatup) (make-tmpair NULL_tmtup NULL_tmtupalst)))) (else (myerror "DIA-extr-vatmpair-aux : " "axiom expected" name)))) ((theorem) (let*((LEGDUM (if EXTRACT-VERBOSE (DIA-comment-forced "BEGINS treatment of THEOREM " (aconst-to-string aconst)))) (rv (DIA-extr-vatmpair KIND (theorem-aconst-to-inst-proof aconst))) (LEGDUM (if EXTRACT-VERBOSE (DIA-comment-forced "ENDED treatment of THEOREM " (aconst-to-string aconst))))) rv)) ((global-assumption) (cond ((string=? "Stab-Log" name) (myerror "DIA-extr-vatmpair: " "Stab and Stab-Log not allowed here;" "you must eliminate them either via reduce-stab," "if the kernel contains no strong Exists or otherwise" "you must use a Negative Translation" "from classical to intuitionistic proofs")) ((string=? "Stab" name) (myerror "DIA-extr-vatmpair: " "Stab and Stab-Log not allowed here;" "you must eliminate them either via reduce-stab," "if the kernel contains no strong Exists or otherwise" "you must use a Negative Translation" "from classical to intuitionistic proofs")) ((string=? "Efq-Log" name) (let*((LEGDUM (if DEBUG (nldisplay "PROOF-IN-Efq-Log-FORM"))) (fmla (aconst-to-inst-formula aconst)) (typr (DIA-formula-to-typair KIND fmla)) (Tx (tytuple-to-ZERO (typair-left typr))) (y (tytuple-to-vatuple (typair-right typr))) (rv (make-vatmpair (make-vapair y NULL_vatup) (make-tmpair Tx NULL_tmtupalst))) (LEGDUM (if DEBUG (nldisplay "FINISHED Efq-Log")))) rv)) ((string=? "Efq" name) (let*((LEGDUM (if DEBUG (nldisplay "PROOF-IN-Efq-FORM"))) (fmla (aconst-to-inst-formula aconst)) (typr (DIA-formula-to-typair KIND fmla)) (Tx (tytuple-to-ZERO (typair-left typr))) (y (tytuple-to-vatuple (typair-right typr))) (rv (make-vatmpair (make-vapair y NULL_vatup) (make-tmpair Tx NULL_tmtupalst))) (LEGDUM (if DEBUG (nldisplay "FINISHED Efq")))) rv)) ((or (and (<= (string-length "Eq-Compat-Rev") (string-length name)) (string=? (substring name 0 (string-length "Eq-Compat-Rev")) "Eq-Compat-Rev")) (and (<= (string-length "Compat-Rev") (string-length name)) (string=? (substring name 0 (string-length "Compat-Rev")) "Compat-Rev"))) (let*((LEGDUM (if DEBUG (nldisplay "PROOF-IN-(Eq-)Compat-Rev-FORM"))) (fmla (aconst-to-inst-formula aconst)) (kernel (imp-form-to-conclusion (imp-form-to-conclusion fmla))) (typr (DIA-formula-to-typair KIND kernel)) (u (tytuple-to-vatuple (typair-left typr))) (v (tytuple-to-vatuple (typair-right typr))) (uv (vatuple-append u v "(Eq-)Compat-Rev")) (Tv (make-tmtuple-in-abst-form uv (vatuple-to-tmtuple v))) (Tu (make-tmtuple-in-abst-form u (vatuple-to-tmtuple u))) (Tvu (tmtuple-append Tv Tu "(Eq-)Compat-Rev")) (rv (make-vatmpair (make-vapair uv NULL_vatup) (make-tmpair Tvu NULL_tmtupalst))) (LEGDUM (if DEBUG (nldisplay "FINISHED (Eq-)Compat-Rev")))) rv)) (else (let*((rv (DIA-uga-to-vatmpair KIND aconst)) ; (LEGDUM ; (DIA-tyva-check KIND prf (proof-to-formula prf) ; rv "UserGlobalAssumption")) ) rv)))) (else (myerror "DIA-extr-vatmpair: " "unknown kind of aconst" (aconst-to-kind aconst)))))) ((proof-in-imp-intro-form) (let*((avar (proof-in-imp-intro-form-to-avar prf)) (LEGDUM (if DEBUG (nldisplay "PROOF-IN-IMP_INTRO-FORM of avar" (avar-to-string avar)))) (data (avar-to-Data avar)) (av-occur (car data)) (LEGDUM (if DEBUG (nldisplay "Imp-Intro: the avar " (avar-to-string avar) "occurs " av-occur " times."))) (kernel (proof-in-imp-intro-form-to-kernel prf)) (vatmpr (DIA-extr-vatmpair-aux KIND kernel avar-to-Data)) (LEGDUM (if DEBUG (nldisplay "Imp-Intro: OK, kernel processed."))) (ke-vapr (vatmpair-to-vapair vatmpr)) (y (vapair-left ke-vapr)) (xpx (vapair-right ke-vapr)) (ke-tmpr (vatmpair-to-tmpair vatmpr)) (T (tmpair-to-tuple ke-tmpr)) (ke-alst (tmpair-to-alist ke-tmpr))) (case av-occur ((0) (if EXTRACT-VERBOSE (DIA-comment-forced "Imp-Intro with DUMMY assumption " (avar-to-string avar) " detected.")) (if DEBUG (DIA-comment "INTRO formula is" 'CNL (formula-to-string (nf (avar-to-formula avar))) 'CNL "and CONCLUSION formula is" 'CNL (formula-to-string (nf (proof-to-formula kernel))))) (let*((typr (DIA-formula-to-typair KIND (avar-to-formula avar))) (ex-tytup (typair-left typr)) (all-tytup (typair-right typr)) (z (tytuple-to-vatuple ex-tytup)) (zero-tmtup (tytuple-to-ZERO all-tytup)) (tmtup-xpx (vatuple-to-tmtuple xpx)) (xz (vatuple-append xpx z "Imp-Intro dummy assum 1")) (xzy (vatuple-append xz y "Imp-Intro dummy assum 2")) (zy (vatuple-append z y "Imp-Intro dummy assum 3")) (St (make-tmtuple-in-abst-form xzy zero-tmtup)) (Ss (make-tmtuple-in-abst-form xz (make-tmtuple-in-app-form T tmtup-xpx))) (S (tmtuple-append St Ss "Imp-Intro dummy assum 4")) (uS (make-tmtuplealist-in-abst-form xz (make-tmtuplealist-in-app-form ke-alst tmtup-xpx))) (rv (make-vatmpair (make-vapair zy xpx) (make-tmpair S uS))) (LEGDUM (DIA-tyva-check KIND prf (proof-to-formula prf) rv "proof-in-imp-intro-form"))) rv)) (else (let*((av-vapr (cadr data)) (z (vapair-left av-vapr)) (vatup-tmtupalst-tmtuplst (Imp-Intro-split avar ke-alst avar-to-Data)) (x (caar vatup-tmtupalst-tmtuplst)) (new-alst (cdar vatup-tmtupalst-tmtuplst)) (tmtuplst (cdr vatup-tmtupalst-tmtuplst)) (LEGDUM (if CHECK (let((l1 (length tmtuplst)) (l2 (length new-alst)) (l3 (length ke-alst)) (l4 (vatuple-len xpx)) (l5 (vatuple-len x)) (l6 (vatuple-len z))) (begin (if (not (= l1 av-occur)) (myerror "DIA-extr-vatmpair: " "internal error 1 at" "imp-intro of assumption " (avar-to-string avar) ": l1 is != av-occur " "l1=" l1 "av-occur=" av-occur ".\n Assumption formula is: " (formula-to-string (avar-to-formula avar)) ".\n Kernel formula is: " (formula-to-string (proof-to-formula kernel)))) (if (not (= l3 (+ l1 l2))) (myerror "DIA-extr-vatmpair: " "internal error 2 at" "imp-intro of assumption " (avar-to-string avar) ": l3 is != (+ l1 l2) " ".\n Assumption formula is: " (formula-to-string (avar-to-formula avar)) ".\n Kernel formula is: " (formula-to-string (proof-to-formula kernel)))) (if (not (= l4 (+ l5 (* l1 l6)))) (myerror "DIA-extr-vatmpair: " "internal error 3 at" "imp-intro of assumption " (avar-to-string avar) ": l4 is != (+ l5 (* l1 l6)) " ".\n Assumption formula is: " (formula-to-string (avar-to-formula avar)) ".\n Kernel formula is: " (formula-to-string (proof-to-formula kernel)))))))) (xz (vatuple-append x z "proof-in-imp-intro-form 1")) (zy (vatuple-append z y "proof-in-imp-intro-form 2")) (xzy (vatuple-append x zy "proof-in-imp-intro-form 3")) (xpxy (vatuple-to-tmtuple (vatuple-append xpx y "proof-in-imp-intro-form 4"))) (tmtup (if (null? (cdr tmtuplst)) (begin (if CONTR-VERBOSE (DIA-comment-forced "Imp-Intro WITHOUT Contraction" (avar-to-string avar) " detected.")) (if DEBUG (DIA-comment "Intro formula is" 'CNL (formula-to-string (nf (avar-to-formula avar))) 'CNL "and conclusion formula is" 'CNL (formula-to-string (nf (proof-to-formula kernel))))) (make-tmtuple-in-app-form (car tmtuplst) xpxy)) (begin (if (NULL-tmtuple? (car tmtuplst)) (begin (set! CIRC-COUNT (+ CIRC-COUNT 1)) (if CONTR-VERBOSE (DIA-comment-forced "Imp-Intro WITH Logical Contraction" (avar-to-string avar) " detected.")) (if DEBUG (DIA-comment "Intro formula is" 'CNL (formula-to-string (nf (avar-to-formula avar))) 'CNL "which occurs " av-occur " times. " "Conclusion formula is" 'CNL (formula-to-string (nf (proof-to-formula kernel))))) (make-tmtuple-in-app-form (car tmtuplst) xpxy)) (begin (set! CRLC-COUNT (+ CRLC-COUNT 1)) (if CONTR-VERBOSE (DIA-comment-forced "Imp-Intro WITH Computational Contraction" (avar-to-string avar) " detected.")) (if DEBUG (DIA-comment "Intro formula is" 'CNL (formula-to-string (nf (avar-to-formula avar))) 'CNL "which occurs " av-occur " times. " "Conclusion formula is" 'CNL (formula-to-string (nf (proof-to-formula kernel))))) (case KIND ((light pure) (if CONTR-VERBOSE (DIA-comment-forced "Application of Imp-Intro-CondN ... " 'CNL "The associated B-term is" (term-to-string (nt (caddr data))))) (Imp-Intro-CondN (make-tmtuplist-in-app-form tmtuplst xpxy) (vapair-right av-vapr) (caddr data))) ((monot) (if CONTR-VERBOSE (DIA-comment-forced "Application of Imp-Intro-MonN ... " 'CNL "The associated B-term is" (term-to-string (nt (caddr data))))) (Imp-Intro-MonN (make-tmtuplist-in-app-form tmtuplst xpxy))) (else (myerror "DIA-extr-vatmpair-aux: Relevant Contraction:" "unknown KIND " KIND)))))))) (St (make-tmtuple-in-abst-form xzy tmtup)) (Ss (make-tmtuple-in-abst-form xz (make-tmtuple-in-app-form T (vatuple-to-tmtuple xpx)))) (S (tmtuple-append St Ss "proof-in-imp-intro-form")) (uS (make-tmtuplealist-in-abst-form xzy (make-tmtuplealist-in-app-form new-alst xpxy))) (rv (make-vatmpair (make-vapair zy x) (make-tmpair S uS))) (LEGDUM (DIA-tyva-check KIND prf (proof-to-formula prf) rv "proof-in-imp-intro-form"))) (nbe-normalize-vatmpair-of IMP-INTRO-NORMALIZE rv)))))) ((proof-in-imp-elim-form) (let((all-elim-tm (DIA-Ind-Rule? prf))) (if all-elim-tm (let*((LEGDUM (if (or DEBUG DEBUG-IND-RL) (nldisplay "PROOF-IN-IndRule-??? Begin"))) (step-prf (proof-in-imp-elim-form-to-arg prf)) (step-avars (proof-to-free-avars step-prf)) (LEGDUM (if DEBUG-IND-RL (nldisplay "Ind-Rule: Step has" (length step-avars) "assumptions" SNL (DIA-avars-to-string step-avars)))) (base-prf (proof-in-imp-elim-form-to-arg (proof-in-imp-elim-form-to-op prf))) (base-avars (proof-to-free-avars base-prf)) (LEGDUM (if DEBUG-IND-RL (nldisplay "Ind-Rule: Base has" (length base-avars) "assumptions" SNL (DIA-avars-to-string base-avars))))) (if (and (null? base-avars) (null? step-avars)) (let*((LEGDUM (begin (set! IndRlZeCOUNTER (+ IndRlZeCOUNTER 1)) (if (or DEBUG EXTRACT-VERBOSE) (nldisplay "PROOF-IN-IndRule-Zero-FORM" IndRlZeCOUNTER "BEGIN")))) (LocalIndRlZeCOUNTER IndRlZeCOUNTER) (step-vatmpr (DIA-extr-vatmpair KIND step-prf)) (LEGDUM (if (or DEBUG EXTRACT-VERBOSE) (nldisplay "PROOF-IN-IndRule-Zero-FORM" LocalIndRlZeCOUNTER "Step Mined"))) (step-tmpr (vatmpair-to-tmpair step-vatmpr)) (step-alist (tmpair-to-alist step-tmpr)) (LEGDUM (if (not-null? step-alist) (myerror "Ind-Rule IR0: Step has" (length step-alist) "assumptions"))) (step-tmtup (tmtuple-right (tmpair-to-tuple step-tmpr))) (base-vatmpr (DIA-extr-vatmpair KIND base-prf)) (LEGDUM (if (or DEBUG EXTRACT-VERBOSE) (nldisplay "PROOF-IN-IndRule-Zero-FORM" LocalIndRlZeCOUNTER "Base Mined"))) (base-vapr (vatmpair-to-vapair base-vatmpr)) (y (vapair-left base-vapr)) (base-tmpr (vatmpair-to-tmpair base-vatmpr)) (base-alist (tmpair-to-alist base-tmpr)) (LEGDUM (if (not-null? base-alist) (myerror "Ind-Rule IR0: Base has" (length base-alist) "assumptions"))) (base-tmtup (tmpair-to-tuple base-tmpr)) (star-tytup (tmtuple-to-tytuple base-tmtup)) (base-tm (DIA-tmtuple-to-star base-tmtup)) (star_typ (term-to-type base-tm)) (fld-ys (type-to-new-var star_typ)) (tm-fld-ys (make-term-in-var-form fld-ys)) (unfld-ys (DIA-star-to-tmtuple tm-fld-ys star-tytup)) (typ_z (py "nat")) (va-z (type-to-new-var typ_z)) (tm-z (make-term-in-var-form va-z)) (z (var-to-vatuple va-z)) (tmtup-z (vatuple-to-tmtuple z)) (tmtup-zys (tmtuple-append tmtup-z unfld-ys "Ind-Rule")) (tmtup-step (make-tmtuple-in-app-form step-tmtup tmtup-zys)) (tm-step (make-term-in-abst-form va-z (make-term-in-abst-form fld-ys (DIA-tmtuple-to-star tmtup-step)))) (Rec (case KIND ((light pure) (DIA-type-to-rec-term typ_z star_typ)) ((monot) (type-to-mon-rec-tm typ_z star_typ)) (else (myerror "DIA-extr-vatmpair: " "Ind - Rec ZERO:" "unknown KIND" KIND)))) (tm-T (make-term-in-app-form (make-term-in-app-form Rec base-tm) tm-step)) (tmtup-T (DIA-star-to-tmtuple (make-term-in-app-form tm-T tm-z) star-tytup)) (real-T (make-tmtuple-in-app-form (make-tmtuple-in-abst-form z tmtup-T) (term-to-tmtuple all-elim-tm))) ;;; (zy (vatuple-append z y "Ind-Rule")) (rv (make-vatmpair (make-vapair y NULL_vatup) (make-tmpair real-T NULL_tmtupalst))) (LEGDUM (nldisplay "PROOF-IN-IndRule-Zero-FORM" LocalIndRlZeCOUNTER "END")) (LEGDUM (DIA-tyva-check KIND prf (proof-to-formula prf) rv "Ind-Rule IR0"))) rv) (let*((LEGDUM (begin (set! IndRuleCOUNTER (+ IndRuleCOUNTER 1)) (if (or DEBUG EXTRACT-VERBOSE) (nldisplay "PROOF-IN-IndRule-FORM" IndRuleCOUNTER "BEGIN")))) (LocalIndRuleCOUNTER IndRuleCOUNTER) (step-fmla (proof-to-formula step-prf)) (step-var (all-form-to-var step-fmla)) (step-ker (all-form-to-kernel step-fmla)) (Az (imp-form-to-premise step-ker)) (LEGDUM (if DEBUG-IND-RL (nldisplay "Ind-Rule IR: KERNEL" "formula is:" SNL(formula-to-string Az)))) (CimpAz-fmla (DIA-IR-mk-imp-formula base-avars (DIA-IR-mk-imp-formula step-avars Az))) (LEGDUM (if DEBUG-IND-RL (DIA-comment-forced "Ind-Rule IR: CimpAz is" (formula-to-string (nf CimpAz-fmla))))) (CimpAz-avr (formula-to-new-avar CimpAz-fmla)) (CimpAz-prf (make-proof-in-avar-form CimpAz-avr)) (Az-prf (DIA-IR-mk-elim-proof (DIA-IR-mk-elim-proof CimpAz-prf base-avars) step-avars)) (tm-step-var (make-term-in-var-form step-var)) (ASz-prf (make-proof-in-imp-elim-form (make-proof-in-all-elim-form step-prf tm-step-var) Az-prf)) (CimpASz-prf (DIA-IR-mk-intro-proof base-avars (DIA-IR-mk-intro-proof step-avars ASz-prf))) (new-step-prf (make-proof-in-all-intro-form step-var (make-proof-in-imp-intro-form CimpAz-avr CimpASz-prf))) (LEGDUM (if (or DEBUG EXTRACT-VERBOSE) (nldisplay "PROOF-IN-IndRule-FORM" LocalIndRuleCOUNTER "new-step-prf BUILT"))) (step-vatmpr (DIA-extr-vatmpair KIND new-step-prf)) (LEGDUM (if (or DEBUG EXTRACT-VERBOSE) (nldisplay "PROOF-IN-IndRule-FORM" LocalIndRuleCOUNTER "new-step-prf MINED"))) (step-tmpr (vatmpair-to-tmpair step-vatmpr)) (step-alist (tmpair-to-alist step-tmpr)) (LEGDUM (if PARANOIA (if (not-null? step-alist) (myerror "Ind-Rule IR: assumptions" "for new-step-prf")))) (step-tmtup (tmtuple-right (tmpair-to-tuple step-tmpr))) (new-base-prf (DIA-IR-mk-intro-proof base-avars (DIA-IR-mk-intro-proof step-avars base-prf))) (LEGDUM (if (or DEBUG EXTRACT-VERBOSE) (nldisplay "PROOF-IN-IndRule-FORM" LocalIndRuleCOUNTER "new-base-prf BUILT"))) (base-vatmpr (DIA-extr-vatmpair KIND new-base-prf)) (LEGDUM (if (or DEBUG EXTRACT-VERBOSE) (nldisplay "PROOF-IN-IndRule-FORM" LocalIndRuleCOUNTER "new-base-prf MINED"))) (base-vapr (vatmpair-to-vapair base-vatmpr)) (y (vapair-left base-vapr)) (base-tmpr (vatmpair-to-tmpair base-vatmpr)) (base-alist (tmpair-to-alist base-tmpr)) (LEGDUM (if PARANOIA (if (not-null? base-alist) (myerror "Ind-Rule IR: Assumptions" "for new-base-prf")))) (base-tmtup (tmpair-to-tuple base-tmpr)) (star-tytup (tmtuple-to-tytuple base-tmtup)) (base-tm (DIA-tmtuple-to-star base-tmtup)) (star_typ (term-to-type base-tm)) (fld-ys (type-to-new-var star_typ)) (tm-fld-ys (make-term-in-var-form fld-ys)) (unfld-ys (DIA-star-to-tmtuple tm-fld-ys star-tytup)) (typ_z (py "nat")) (va-z (type-to-new-var typ_z)) (tm-z (make-term-in-var-form va-z)) (z (var-to-vatuple va-z)) (tmtup-z (vatuple-to-tmtuple z)) (tmtup-zys (tmtuple-append tmtup-z unfld-ys "Ind-Rule")) (tmtup-step (make-tmtuple-in-app-form step-tmtup tmtup-zys)) (tm-step (make-term-in-abst-form va-z (make-term-in-abst-form fld-ys (DIA-tmtuple-to-star tmtup-step)))) (Rec (case KIND ((light pure) (DIA-type-to-rec-term typ_z star_typ)) ((monot) (type-to-mon-rec-tm typ_z star_typ)) (else (myerror "DIA-extr-vatmpair: " "Ind - Rec:" "unknown KIND" KIND)))) (tm-T (make-term-in-app-form (make-term-in-app-form Rec base-tm) tm-step)) (tmtup-T (DIA-star-to-tmtuple (make-term-in-app-form tm-T tm-z) star-tytup)) (real-T (make-tmtuple-in-app-form (make-tmtuple-in-abst-form z tmtup-T) (term-to-tmtuple all-elim-tm))) ;;; (zy (vatuple-append z y "Ind-Rule")) (rv (make-vatmpair (make-vapair y NULL_vatup) (make-tmpair real-T NULL_tmtupalst))) (IR-fmla (DIA-IR-mk-imp-formula base-avars (DIA-IR-mk-imp-formula step-avars (proof-to-formula prf)))) (IR-acnst (make-aconst "Ind-Rule" 'global-assumption IR-fmla empty-subst)) (LEGDUM (set! DIA-UGA-ALIST (cons (cons IR-acnst rv) DIA-UGA-ALIST))) (IR-acnst-prf (list 'proof-in-aconst-form IR-fmla IR-acnst)) (IR-prf IR-acnst-prf) (fin-prf (DIA-IR-mk-elim-proof (DIA-IR-mk-elim-proof IR-prf base-avars) step-avars)) (LEGDUM (if (or DEBUG EXTRACT-VERBOSE) (nldisplay "PROOF-IN-IndRule-FORM" LocalIndRuleCOUNTER "fin-prf BUILT"))) (fin-rv (DIA-extr-vatmpair-aux KIND fin-prf avar-to-Data)) (LEGDUM (if (or DEBUG EXTRACT-VERBOSE) (nldisplay "PROOF-IN-IndRule-FORM" LocalIndRuleCOUNTER "fin-prf MINED"))) (LEGDUM (DIA-tyva-check KIND prf (proof-to-formula prf) fin-rv "Ind-Rule IR")) (LEGDUM (set! DIA-UGA-ALIST (cdr DIA-UGA-ALIST)))) (nbe-normalize-vatmpair-of IND-RL-NORMALIZE fin-rv)))) (let*((LEGDUM (if DEBUG (nldisplay "PROOF-IN-IMP_ELIM-FORM"))) (op (proof-in-imp-elim-form-to-op prf)) (op-vatmpr (DIA-extr-vatmpair-aux KIND op avar-to-Data)) (AimpB (proof-to-formula op)) (LEGDUM (if PARANOIA (if (not (imp-form? AimpB)) (myerror "PROOF-IN-imp-elim-FORM: " "formula in imp form expected" (formula-to-string AimpB))))) (LEGDUM (if DEBUG (begin (nldisplay "PROOF-IN-imp-elim-FORM:" "A -> B was processed OK") (DIA-comment "Formula was" 'CNL (formula-to-string AimpB))))) (A (imp-form-to-premise AimpB)) (B (imp-form-to-conclusion AimpB)) (extra-free-vars-ini (set-minus (formula-to-free A) (formula-to-free B))) (op-vapr (vatmpair-to-vapair op-vatmpr)) (op-tmpair (vatmpair-to-tmpair op-vatmpr)) (op-tmtupalst (tmpair-to-alist op-tmpair)) (extra-free-vars-bis (DIA-set-minus extra-free-vars-ini op-tmtupalst)) (arg (proof-in-imp-elim-form-to-arg prf)) (old-arg-vatmpr (DIA-extr-vatmpair-aux KIND arg avar-to-Data)) (arg-vatmpr (nbe-normalize-vatmpair-of IMP-ARG-NORMALIZE old-arg-vatmpr)) (prem (imp-form-to-premise AimpB)) (LEGDUM (if DEBUG (begin (nldisplay "PROOF-IN-imp-elim-FORM:" "Premise was processed OK") (DIA-comment "Formula was" 'CNL (formula-to-string prem))))) (arg-vapr (vatmpair-to-vapair arg-vatmpr)) (arg-tmpair (vatmpair-to-tmpair arg-vatmpr)) (arg-tmtupalst (tmpair-to-alist arg-tmpair)) (extra-free-vars (DIA-set-minus extra-free-vars-bis arg-tmtupalst)) (xp (vapair-right arg-vapr)) (xs (vapair-right op-vapr)) (x (vatuple-append xp xs "proof-in-imp-elim-form 1")) (ysy (vapair-left op-vapr)) (yp (vapair-left arg-vapr)) (conc-free (formula-to-free (proof-to-formula prf))) (subst (DIA-make-ZERO-alist extra-free-vars)) (Tp (DIA-tmtuple-non-simult-subst (tmpair-to-tuple arg-tmpair) subst)) (TsT (DIA-tmtuple-non-simult-subst (tmpair-to-tuple op-tmpair) subst)) (xsTpxp (tmtuple-append (vatuple-to-tmtuple xs) (make-tmtuple-in-app-form Tp (vatuple-to-tmtuple xp)) "proof-in-imp-elim-form 1")) (ys (vatuple-left ysy)) (y (vatuple-right ysy)) (xy (vatuple-append x y "proof-in-imp-elim-form 2")) (Ts (tmtuple-left TsT)) (T (tmtuple-right TsT)) (xsTpxpy (tmtuple-append xsTpxp (vatuple-to-tmtuple y) "proof-in-imp-elim-form 2")) (xpTsxsTpxpy (tmtuple-append (vatuple-to-tmtuple xp) (make-tmtuple-in-app-form Ts xsTpxpy) "proof-in-imp-elim-form 3")) (S (make-tmtuple-in-abst-form x (make-tmtuple-in-app-form T xsTpxp))) (uSarg (make-tmtuplealist-in-abst-form xy (make-tmtuplealist-in-app-form (tmtuplealist-non-simult-subst arg-tmtupalst subst) xpTsxsTpxpy))) (uSop (make-tmtuplealist-in-abst-form x (make-tmtuplealist-in-app-form (tmtuplealist-non-simult-subst op-tmtupalst subst) xsTpxp))) (uS (append uSarg uSop)) (rv (make-vatmpair (make-vapair y x) (make-tmpair S uS))) (LEGDUM (DIA-tyva-check KIND prf (proof-to-formula prf) rv "proof-in-imp-elim-form"))) (nbe-normalize-vatmpair-of IMP-ELIM-NORMALIZE rv))))) ((proof-in-and-intro-form) (let*((LEGDUM (if DEBUG (nldisplay "PROOF-IN-AND_INTRO-FORM"))) (left (proof-in-and-intro-form-to-left prf)) (right (proof-in-and-intro-form-to-right prf)) (left-vatmpr (DIA-extr-vatmpair-aux KIND left avar-to-Data)) (left-vapr (vatmpair-to-vapair left-vatmpr)) (yp (vapair-left left-vapr)) (xp (vapair-right left-vapr)) (left-tmpair (vatmpair-to-tmpair left-vatmpr)) (Tp (tmpair-to-tuple left-tmpair)) (uTp (tmpair-to-alist left-tmpair)) (right-vatmpr (DIA-extr-vatmpair-aux KIND right avar-to-Data)) (right-vapr (vatmpair-to-vapair right-vatmpr)) (ys (vapair-left right-vapr)) (xs (vapair-right right-vapr)) (right-tmpair (vatmpair-to-tmpair right-vatmpr)) (Ts (tmpair-to-tuple right-tmpair)) (uTs (tmpair-to-alist right-tmpair)) (x (vatuple-append xp xs "proof-in-and-intro-form 1")) (y (vatuple-append yp ys "proof-in-and-intro-form 2")) (xy (vatuple-append x y "proof-in-and-intro-form 3")) (xpyp (vatuple-append xp yp "proof-in-and-intro-form 4")) (xsys (vatuple-append xs ys "proof-in-and-intro-form 5")) (uSA (make-tmtuplealist-in-app-form uTp (vatuple-to-tmtuple xpyp))) (uSB (make-tmtuplealist-in-app-form uTs (vatuple-to-tmtuple xsys))) (uS (make-tmtuplealist-in-abst-form xy (append uSA uSB))) (SA (make-tmtuple-in-abst-form x (make-tmtuple-in-app-form Tp (vatuple-to-tmtuple xp)))) (SB (make-tmtuple-in-abst-form x (make-tmtuple-in-app-form Ts (vatuple-to-tmtuple xs)))) (S (tmtuple-append SA SB "proof-in-and-intro-form")) (rv (make-vatmpair (make-vapair y x) (make-tmpair S uS))) (LEGDUM (DIA-tyva-check KIND prf (proof-to-formula prf) rv "proof-in-and-intro-form"))) rv)) ((proof-in-and-elim-left-form) (let*((LEGDUM (if DEBUG (nldisplay "PROOF-IN-AND_ELIM_LEFT-FORM"))) (kernel (proof-in-and-elim-left-form-to-kernel prf)) (fmla (proof-to-formula kernel)) (A (and-form-to-left fmla)) (B (and-form-to-right fmla)) (extra-free-vars-ini (set-minus (formula-to-free B) (formula-to-free A))) (vatmpr (DIA-extr-vatmpair-aux KIND kernel avar-to-Data)) (tmpair (vatmpair-to-tmpair vatmpr)) (tmtupalst (tmpair-to-alist tmpair)) (extra-free-vars (DIA-set-minus extra-free-vars-ini tmtupalst)) (subst (DIA-make-ZERO-alist extra-free-vars)) (Tp (DIA-tmtuple-non-simult-subst (tmtuple-left (tmpair-to-tuple tmpair)) subst)) (new-tmtupalst (tmtuplealist-non-simult-subst tmtupalst subst)) (vapr (vatmpair-to-vapair vatmpr)) (x (vapair-right vapr)) (vatp (vapair-left vapr)) (yp (vatuple-left vatp)) (xyp (vatuple-append x yp "proof-in-and-elim-left-form")) (xypZERO (tmtuple-append (vatuple-to-tmtuple xyp) (vatuple-to-zero-tmtuple (vatuple-right vatp)) "proof-in-and-elim-left-form")) (uS (make-tmtuplealist-in-abst-form xyp (make-tmtuplealist-in-app-form new-tmtupalst xypZERO))) (rv (make-vatmpair (make-vapair yp x) (make-tmpair Tp uS))) (LEGDUM (DIA-tyva-check KIND prf (proof-to-formula prf) rv "proof-in-and-elim-left-form"))) (nbe-normalize-vatmpair-of AND-NORMALIZE rv))) ((proof-in-and-elim-right-form) (let*((LEGDUM (if DEBUG (nldisplay "PROOF-IN-AND_ELIM_RIGHT-FORM"))) (kernel (proof-in-and-elim-right-form-to-kernel prf)) (fmla (proof-to-formula kernel)) (A (and-form-to-left fmla)) (B (and-form-to-right fmla)) (extra-free-vars-ini (set-minus (formula-to-free A) (formula-to-free B))) (vatmpr (DIA-extr-vatmpair-aux KIND kernel avar-to-Data)) (tmpair (vatmpair-to-tmpair vatmpr)) (tmtupalst (tmpair-to-alist tmpair)) (extra-free-vars (DIA-set-minus extra-free-vars-ini tmtupalst)) (subst (DIA-make-ZERO-alist extra-free-vars)) (Ts (DIA-tmtuple-non-simult-subst (tmtuple-right (tmpair-to-tuple tmpair)) subst)) (new-tmtupalst (tmtuplealist-non-simult-subst tmtupalst subst)) (vapr (vatmpair-to-vapair vatmpr)) (x (vapair-right vapr)) (vatp (vapair-left vapr)) (ys (vatuple-right vatp)) (xys (vatuple-append x ys "proof-in-and-elim-right-form")) (xZEROys (tmtuple-append (vatuple-to-tmtuple x) (tmtuple-append (vatuple-to-zero-tmtuple (vatuple-left vatp)) (vatuple-to-tmtuple ys) "proof-in-and-elim-right-form 1") "proof-in-and-elim-right-form 2")) (uS (make-tmtuplealist-in-abst-form xys (make-tmtuplealist-in-app-form tmtupalst xZEROys))) (rv (make-vatmpair (make-vapair ys x) (make-tmpair Ts uS))) (LEGDUM (DIA-tyva-check KIND prf (proof-to-formula prf) rv "proof-in-and-elim-right-form"))) (nbe-normalize-vatmpair-of AND-NORMALIZE rv))) ((proof-in-all-intro-form) (let*((LEGDUM (if DEBUG (nldisplay "PROOF-IN-ALL_INTRO-FORM"))) (z (var-to-vatuple (proof-in-all-intro-form-to-var prf))) (kernel (proof-in-all-intro-form-to-kernel prf)) (vatmpr (DIA-extr-vatmpair-aux KIND kernel avar-to-Data)) (tmpair (vatmpair-to-tmpair vatmpr)) (vapr (vatmpair-to-vapair vatmpr)) (x (vapair-right vapr)) (T (tmpair-to-tuple tmpair)) (uT (tmpair-to-alist tmpair)) (xz (vatuple-append x z "proof-in-all-intro-form 1")) (zy (vatuple-append z (vapair-left vapr) "proof-in-all-intro-form 2")) (S (make-tmtuple-in-abst-form xz (make-tmtuple-in-app-form T (vatuple-to-tmtuple x)))) (uS (make-tmtuplealist-in-abst-form xz (make-tmtuplealist-in-app-form uT (vatuple-to-tmtuple x)))) (rv (make-vatmpair (make-vapair zy x) (make-tmpair S uS))) (LEGDUM (DIA-tyva-check KIND prf (proof-to-formula prf) rv "proof-in-all-intro-form"))) rv)) ((proof-in-all-elim-form) (let*((LEGDUM (if DEBUG (nldisplay "PROOF-IN-ALL_ELIM-FORM"))) (op (proof-in-all-elim-form-to-op prf)) (all-fmla (proof-to-formula op)) (LEGDUM (if PARANOIA (if (not (all-form? all-fmla)) (myerror "PROOF-IN-all-elim-FORM: " "all-formula expected " (formula-to-string all-fmla))))) (all-var (all-form-to-var all-fmla)) (ker-fmla (all-form-to-kernel all-fmla)) (arg (proof-in-all-elim-form-to-arg prf)) (tmtup-arg ; (if (notelem? all-var ; (formula-to-free ker-fmla)) ; (begin ; (DIA-comment ; "PROOF-IN-all-elim-FORM:" ; "DUMMY substitution detected" ; "for formula" 'CNL ; (formula-to-string all-fmla) ; 'CNL "The variable " ; (var-to-string all-var) ; " does not occur free" ; "in the kernel" 'CNL ; (formula-to-string ker-fmla)) ; (term-to-zero-tmtuple arg)) (term-to-tmtuple arg)) (vatmpr (DIA-extr-vatmpair-aux KIND op avar-to-Data)) (tmpr (vatmpair-to-tmpair vatmpr)) (vapr (vatmpair-to-vapair vatmpr)) (zy (vapair-left vapr)) (x (vapair-right vapr)) (S (tmpair-to-tuple tmpr)) (uS (tmpair-to-alist tmpr)) (y (vatuple-right zy)) (xt (tmtuple-append (vatuple-to-tmtuple x) tmtup-arg "proof-in-all-elim-form")) (T (make-tmtuple-in-abst-form x (make-tmtuple-in-app-form S xt))) (uT (make-tmtuplealist-in-abst-form x (make-tmtuplealist-in-app-form uS xt))) (rv (make-vatmpair (make-vapair y x) (make-tmpair T uT))) (LEGDUM (DIA-tyva-check KIND prf (proof-to-formula prf) rv "proof-in-all-elim-form"))) (nbe-normalize-vatmpair-of ALL-NORMALIZE rv))) ((proof-in-allnc-intro-form) (case KIND ((light monot) (DIA-extr-vatmpair-aux KIND (proof-in-allnc-intro-form-to-kernel prf) avar-to-Data)) ((pure) (let*((LEGDUM (if DEBUG (nldisplay "PDI PROOF-IN-ALLNC_INTRO-FORM"))) (z (var-to-vatuple (proof-in-allnc-intro-form-to-var prf))) (kernel (proof-in-allnc-intro-form-to-kernel prf)) (vatmpr (DIA-extr-vatmpair-aux KIND kernel avar-to-Data)) (tmpair (vatmpair-to-tmpair vatmpr)) (vapr (vatmpair-to-vapair vatmpr)) (x (vapair-right vapr)) (T (tmpair-to-tuple tmpair)) (uT (tmpair-to-alist tmpair)) (xz (vatuple-append x z "proof-in-allnc-intro-form 1")) (zy (vatuple-append z (vapair-left vapr) "proof-in-allnc-intro-form 2")) (S (make-tmtuple-in-abst-form xz (make-tmtuple-in-app-form T (vatuple-to-tmtuple x)))) (uS (make-tmtuplealist-in-abst-form xz (make-tmtuplealist-in-app-form uT (vatuple-to-tmtuple x)))) (rv (make-vatmpair (make-vapair zy x) (make-tmpair S uS))) (LEGDUM (DIA-tyva-check KIND prf (proof-to-formula prf) rv "proof-in-allnc-intro-form"))) rv)) (else (myerror "DIA-extr-vatmpair-aux, proof-in-allnc-intro-form:" "unknown KIND")))) ((proof-in-allnc-elim-form) (let((LEGDUM (if DEBUG (nldisplay "PROOF-IN-allnc-elim-FORM"))) (final-op (DIA-allnc-form-to-final prf)) (rv #f)) (begin (if (proof-in-aconst-form? final-op) (let*((acnst (proof-in-aconst-form-to-aconst final-op)) (name (aconst-to-name acnst)) (cstknd (aconst-to-kind acnst)) (LEGDUM (if DEBUG (nldisplay "MULTIPLE allnc-elim" name " of " cstknd " kind " (formula-to-string (proof-to-formula prf)) SNL (formula-to-string (proof-to-formula final-op)))))) ; (if (string (string-length "Eq-Compat-Rev") (string-length name)) (not (string=? (substring name 0 (string-length "Eq-Compat-Rev")) "Eq-Compat-Rev"))) (or (> (string-length "Compat-Rev") (string-length name)) (not (string=? (substring name 0 (string-length "Compat-Rev")) "Compat-Rev"))))))) (define (not-user-global-assumption? x) (cond ((not (aconst? x)) #t) ((not (Eq? (aconst-to-kind x) 'global-assumption)) #t) (else (let((name (aconst-to-name x))) (cond ((string=? "Stab-Log" name) #t) ((string=? "Stab" name) #t) ((string=? "Efq-Log" name) #t) ((string=? "Efq" name) #t) ((and (<= (string-length "Eq-Compat-Rev") (string-length name)) (string=? "Eq-Compat-Rev" (substring name 0 (string-length "Eq-Compat-Rev")))) #t) ((and (<= (string-length "Compat-Rev") (string-length name)) (string=? "Compat-Rev" (substring name 0 (string-length "Compat-Rev")))) #t) (else (and (not (assoc name GLOBAL-ASSUMPTIONS)) (not (string=? "Ind-Rule" name))))))))) (define uga? user-global-assumption?) (define (not-uga? x) (begin (if DEBUG (DIA-comment "not-uga? INVOKED")) (not-user-global-assumption? x))) (define (uga=? acst1 acst2) (cond ((not-eq? 'global-assumption (aconst-to-kind acst1)) (myerror "uga=?: 1st argument not a" "global assumption" acst1)) ((not-eq? 'global-assumption (aconst-to-kind acst2)) (myerror "uga=?: 2nd argument not a" "global assumption" acst2)) ((string=? (aconst-to-name acst1) (aconst-to-name acst2)) #t) (else #f))) (define (DIA-UGA-ALIST-to-string) (string-append "{UGA-BEGIN:" (DIA-UGA-ALIST-to-string-aux DIA-UGA-ALIST))) (define (DIA-UGA-ALIST-to-string-aux alist) (if (null? alist) ":UGA-END}" (string-append ": " (aconst-to-string (caar alist)) " => " (formula-to-string (aconst-to-formula (caar alist))) " :" (DIA-UGA-ALIST-to-string-aux (cdr alist))))) (define (DIA-assoc-UGA acst) (begin (if PARANOIA (if (not-uga? acst) (myerror "DIA-assoc-UGA: user global" "assumption expected" acst))) (DIA-assoc-UGA-aux acst DIA-UGA-ALIST))) (define (DIA-assoc-UGA-aux acst galst) (if (null? galst) #f (if (uga=? acst (caar galst)) (car galst) (DIA-assoc-UGA-aux acst (cdr galst))))) (define DIA-UGA-ALIST '()) (define (DIA-uga-to-vatmpair KIND x) (begin (if CHECK (if (not-uga? x) (myerror "DIA-uga-to-vatmpair: User" "Global Assumption argument expected"))) (let*((name (aconst-to-name x)) (LEGDUM (if DEBUG (nldisplay "UserGlobalAssumption: " name))) (info (DIA-assoc-UGA x)) (LEGDUM (if DEBUG-UGA (nldisplay "DIA-uga-to-vatmpair : " "info determined"))) (rv (if info (cdr info) (let*((fmla (aconst-to-inst-formula x)) (new-typr (DIA-formula-to-typair KIND fmla)) (ex-tytup (typair-left new-typr)) (fa-tytup (typair-right new-typr)) (LEGDUM (if DEBUG-UGA (nldisplay "User Global Assumption detected: " name "of shape: \n " (formula-to-string fmla) "\nwhich produces universally" "quantified variables of types: \n" (tytuple-to-string fa-tytup) "\nand requires user-defined realizers" "for the existential part of types: \n" (tytuple-to-string ex-tytup)))) (tmtup (DIA-uga-user-defined-realizers ex-tytup name)) (new-rv (make-vatmpair (make-vapair (tytuple-to-vatuple fa-tytup) NULL_vatup) (make-tmpair tmtup NULL_tmtupalst)))) (begin (set! DIA-UGA-ALIST (cons (cons x new-rv) DIA-UGA-ALIST)) new-rv))))) rv))) (define (DIA-uga-user-defined-realizers tytup name) (if (NULL-tytup? tytup) (begin (if DEBUG-UGA (nldisplay "OK, existential part of UGA" name "is empty," "no user-defined realizer needed!")) (tytuple-to-tmtuple tytup)) (begin (myerror "Here the user should provide a realizing tmtuple." "NOT YET IMPLEMENTED")))) (define (DIA-IR-mk-imp-formula avars fmla) (if (null? avars) fmla (make-imp (avar-to-formula (car avars)) (DIA-IR-mk-imp-formula (cdr avars) fmla)))) (define (DIA-IR-mk-elim-proof prf avars) (if (null? avars) prf (DIA-IR-mk-elim-proof (make-proof-in-imp-elim-form prf (make-proof-in-avar-form (car avars))) (cdr avars)))) (define (DIA-IR-mk-intro-proof avars prf) (if (null? avars) prf (make-proof-in-imp-intro-form (car avars) (DIA-IR-mk-intro-proof (cdr avars) prf)))) (define (DIA-Ind-Rule? prf) (begin (if DEBUG-IND-RL (begin (nldisplay "DIA-Ind-Rule?: Begin") (cdp prf))) (let((op (proof-in-imp-elim-form-to-op prf))) (if (not (proof-in-imp-elim-form? op)) (begin (if DEBUG-IND-RL (nldisplay "DIA-Ind-Rule?: End" "op is not imp-elim")) #f) (let((arg-fmla (proof-to-formula (proof-in-imp-elim-form-to-arg prf)))) (if (not (all-form? arg-fmla)) (begin (if DEBUG-IND-RL (nldisplay "DIA-Ind-Rule?: End" "arg-fmla is not all-form")) #f) (let((fin-op (DIA-all-allnc-form-to-final (proof-in-imp-elim-form-to-op op)))) (if (not (proof-in-aconst-form? fin-op)) (begin (if DEBUG-IND-RL (nldisplay "DIA-Ind-Rule?: End" "fin-op is not aconst")) #f) (let((name (aconst-to-name (proof-in-aconst-form-to-aconst fin-op)))) (if (not (string=? "Ind" name)) (begin (if DEBUG-IND-RL (nldisplay "DIA-Ind-Rule?: End" "fin-op is not an Ind")) #f) (let*((op-op (proof-in-imp-elim-form-to-op op))) (if (not (proof-in-all-elim-form? op-op)) (begin (if DEBUG-IND-RL (nldisplay "DIA-Ind-Rule?: End" "op-op is not all-elim")) #f) (proof-in-all-elim-form-to-arg op-op))))))))))))) ;; Some auxiliary code for the "imp-intro" case of DIA-extr-vatmpair (define (Imp-Intro-split avar tmtupalst avar-to-Data) (begin (if PARANOIA (if (not-avar? avar) (myerror "Imp-Intro-split:" "1st argument must be avar") (if (ntmtuplealist? tmtupalst) (myerror "Imp-Intro-split: 2nd argument" "must be tmtuplealist")))) (Imp-Intro-split-aux avar tmtupalst avar-to-Data))) (define (Imp-Intro-split-aux avar tmtupalst avar-to-Data) (if (null? tmtupalst) (cons (cons NULL_vatup (list)) (list)) (let((recval (Imp-Intro-split-aux avar (cdr tmtupalst) avar-to-Data))) (if (avar=? avar (caar tmtupalst)) (cons (car recval) (cons (cdar tmtupalst) (cdr recval))) (cons (cons (vatuple-append (vapair-left (cadr (avar-to-Data (caar tmtupalst)))) (caar recval) "Imp-Intro-split") (cons (car tmtupalst) (cdar recval))) (cdr recval)))))) (define (Imp-Intro-CondN tmtuplst vatup tm) (begin (if PARANOIA (if (not-tmtuplist? tmtuplst) (myerror "Imp-Intro-CondN: 1st" "argument must be a tmtuplist") (if (null? tmtuplst) (myerror "Imp-Intro-CondN:" "Non-NULL tmtuplist expected") (if (null? (cdr tmtuplst)) (myerror "Imp-Intro-CondN:" "Non-SINGLETON tmtuplist" "expected"))))) (if PARANOIA (if (not-vatuple? vatup) (myerror "Imp-Intro-CondN: 2nd" "argument must be a vatuple" vatup) (if (not-DIA-term? tm) (myerror "Imp-Intro-CondN: 3rd" "argument must be a term" tm)))) (let((nrm-tm (if CONDN-NORMALIZE (DIA-time (nt tm)) tm))) (Imp-Intro-CondN-aux tmtuplst (DIA-mk-subst vatup nrm-tm tmtuplst))))) (define (DIA-mk-subst vatup tm tmtuplst) (if (null? (cdr tmtuplst)) (list) (cons (term-substitute tm (DIA-make-substitution vatup (car tmtuplst) "Imp-Intro-CondN")) (DIA-mk-subst vatup tm (cdr tmtuplst))))) (define (Imp-Intro-CondN-aux tmtuplst boolest) (if (null? (cdr tmtuplst)) (car tmtuplst) (make-tmtuple-in-if-form (car boolest) (Imp-Intro-CondN-aux (cdr tmtuplst) (cdr boolest)) (car tmtuplst)))) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;; NEW Mon Begin ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (define (Imp-Intro-MonN tmtuplst) (begin (if PARANOIA (if (not-tmtuplist? tmtuplst) (myerror "Imp-Intro-MonN: 1st" "argument must be a tmtuplist") (if (null? tmtuplst) (myerror "Imp-Intro-MonN:" "Non-NULL tmtuplist expected") (if (null? (cdr tmtuplst)) (myerror "Imp-Intro-MonN:" "Non-SINGLETON tmtuplist" "expected"))))) (let*((tmtup (car tmtuplst)) (tytup (tmtuple-to-tytuple tmtup)) (maxtup (tytuple-to-max tytup))) (begin (Imp-Intro-MonN-aux tmtuplst maxtup))))) (define (Imp-Intro-MonN-aux tmtuplst maxtup) (begin (if (null? (cdr tmtuplst)) (car tmtuplst) (make-tmtuple-in-paral-app-form (make-tmtuple-in-paral-app-form maxtup (car tmtuplst)) (Imp-Intro-MonN-aux (cdr tmtuplst) maxtup))))) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;; NEW Mon End ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; A simple and NOT complete check that a "vatmpair" realizes ;; a given proof. Used to check the extracted vatmpair in ;; dia-extr-vatmpair (define (va-check fmla vatmpr) (let*((tmpair (vatmpair-to-tmpair vatmpr)) (tmtpl (tmpair-to-tuple tmpair)) (alst (tmpair-to-alist tmpair)) (tmtpl-free (union (tmtuple-to-free tmtpl) (alist-to-tmtuple-free alst))) (fmla-free (union (formula-to-free fmla) (alist-to-formula-free alst)))) (if (notsubset? tmtpl-free fmla-free) (set-minus tmtpl-free fmla-free) #f))) (define (notelem? var lst) (if (null? lst) #t (if (equal? var (car lst)) #f (notelem? var (cdr lst))))) (define (elem-alist? var alst) (if (null? alst) #f (if (equal? var (caar alst)) #t (elem-alist? var (cdr alst))))) (define (notsubset? lstA lstB) (if (null? lstA) #f (if (notelem? (car lstA) lstB) #t (notsubset? (cdr lstA) lstB)))) (define (nDIA-tyva-check KIND prf fmla vatmpr err) (if (DIA-tyva-check KIND prf fmla vatmpr err) #f #t)) (define (DIA-tyva-check KIND prf fmla vatmpr err) (if (or CHECK PARANOIA DEBUG) (begin (if DEBUG (nldisplay "DIA-TYVA-CHECK: " err ;;; (formula-to-string (nf fmla)) )) (if (not (DIA-ty-check KIND fmla vatmpr)) (begin (nldisplay "DIA-TYVA-CHECK: " err "ERROR at type checking") (cdp prf) (DIA-comment (normalize-vatmpair-to-string vatmpr)) (myerror "SERIOUS DIA-extraction ERROR"))) (let((valst (va-check fmla vatmpr))) (if valst (begin (nldisplay "DIA-TYVA-CHECK: " err "ERROR at free-var checking." "Variables " (valist-to-string valst) "are free in the realizing terms" "and not among the free vars of " (formula-to-string (nf fmla))) (cdp prf) (DIA-comment (normalize-vatmpair-to-string vatmpr)) (myerror "SERIOUS DIA-extraction ERROR")))) #t) #t)) (define (DIA-ty-check KIND fmla vatmpr) (let*((vapr (vatmpair-to-vapair vatmpr)) (vatup-y (vapair-left vapr)) (tytup-y (vatuple-to-tytuple vatup-y)) (vatup-x (vapair-right vapr)) (tytup-x (vatuple-to-tytuple vatup-x)) (tmpr (vatmpair-to-tmpair vatmpr)) (tmtup-T (tmpair-to-tuple tmpr)) (tmtup-Tx (make-tmtuple-in-app-form tmtup-T (vatuple-to-tmtuple vatup-x))) (tytup-Tx (tmtuple-to-tytuple tmtup-Tx)) (typr (DIA-formula-to-typair KIND fmla)) (concl-univ (typair-right typr)) (concl-exis (typair-left typr)) (universal (if (tytuple_Eq? tytup-y concl-univ) #t (begin (nldisplay "DIA-ty-check:" "unequal universal tytuples" SNL (tytuple-to-string tytup-y) SNL "while the following was expected" SNL (tytuple-to-string concl-univ)) #f))) (existential (if (tytuple_Eq? tytup-Tx concl-exis) #t (begin (nldisplay "DIA-ty-check:" "unequal existential tytuples" SNL (tytuple-to-string tytup-Tx) SNL "while the following was expected" SNL (tytuple-to-string concl-exis)) #f))) (test-one (and universal existential))) test-one)) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;; NEW Mon Begin ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (define load-nat-if-unloaded (if (not (assoc "NatPlus" PROGRAM-CONSTANTS)) (libload "nat.scm"))) (define (make-Max) (begin (add-program-constant "Max" (py "nat=>nat=>nat") 1) (add-computation-rule (pt "Max (Succ m) (Succ n)") (pt "Succ (Max m n)")) (add-rewrite-rule (pt "Max nat 0") (pt "nat")) (add-rewrite-rule (pt "Max 0 nat") (pt "nat")) (add-rewrite-rule (pt "Max nat (Max nat n)") (pt "Max nat n")) (add-rewrite-rule (pt "Max nat (Max n nat)") (pt "Max nat n")) (add-rewrite-rule (pt "Max (Max nat n) nat") (pt "Max nat n")) (add-rewrite-rule (pt "Max (Max n nat) nat") (pt "Max nat n")) )) (define make-Max-once (if (assoc "Max" PROGRAM-CONSTANTS) (comment "WARNING: Max is already a program constant. Default to existent, no new addition !!!") (make-max))) (define (type-to-max typ) (cond ((star-form? typ) (let*((var_x (type-to-new-var typ)) (tm_x (make-term-in-var-form var_x)) (var_y (type-to-new-var typ)) (tm_y (make-term-in-var-form var_y)) (max_l (type-to-max (star-form-to-left-type typ))) (max_r (type-to-max (star-form-to-right-type typ)))) (make-term-in-abst-form var_x (make-term-in-abst-form var_y (make-term-in-pair-form (make-term-in-app-form (make-term-in-app-form max_l (make-term-in-lcomp-form tm_x)) (make-term-in-lcomp-form tm_y)) (make-term-in-app-form (make-term-in-app-form max_r (make-term-in-rcomp-form tm_x)) (make-term-in-rcomp-form tm_y))))))) ((arrow-form? typ) (let*((var_x (type-to-new-var typ)) (tm_x (make-term-in-var-form var_x)) (var_y (type-to-new-var typ)) (tm_y (make-term-in-var-form var_y)) (var_z (type-to-new-var (arrow-form-to-arg-type typ))) (tm_z (make-term-in-var-form var_z)) (max_val (type-to-max (arrow-form-to-val-type typ)))) (make-term-in-abst-form var_x (make-term-in-abst-form var_y (make-term-in-abst-form var_z (make-term-in-app-form (make-term-in-app-form max_val (make-term-in-app-form tm_x tm_z)) (make-term-in-app-form tm_y tm_z))))))) ((alg-form? typ) (if (string=? "nat" (alg-form-to-name typ)) (pt "Max") (myerror "type-to-max: only nat algebra allowed as ground type"))) (else (myerror "type-to-max: type argument expected" typ)))) (define (tytuple-to-max tytup) (if (not-tytuple? tytup) (myerror "tytuple-to-max:" "tytuple argument expected" tytup) (tytuple-to-max-aux tytup))) (define (tytuple-to-max-aux tytup) (if (null? (cdr tytup)) NULL_tmtup (if (null? (cddr tytup)) (term-to-tmtuple (type-to-max (cadr tytup))) (tmtuple-append (tytuple-to-max-aux (cadr tytup)) (tytuple-to-max-aux (cddr tytup)) "tytuple-to-max" )))) (define (mk-term-in-max-form tmlst) (if (null? tmlst) (begin (nldisplay "WARNING: mk-term-in-max-form:" "unexpected NULL argument") tmlst) (mk-term-in-max-form-aux (car tmlst) (cdr tmlst)))) (define (mk-term-in-max-form-aux tm tmlst) (if (null? tmlst) tm (let((Max (type-to-max (term-to-type tm))) (max_tmlst (mk-term-in-max-form-aux tmlst))) (mk-term-in-app-form Max tm max_tmlst)))) (define (type-to-mon-rec-tm typ_z star_typ) (begin (let((rv (let*((Rec (DIA-type-to-rec-term typ_z star_typ)) (var_n (type-to-new-var typ_z)) (tm_n (make-term-in-var-form var_n)) (var_y (type-to-new-var star_typ)) (tm_y (make-term-in-var-form var_y)) (var_z (type-to-new-var (make-arrow typ_z (make-arrow star_typ star_typ)))) (tm_z (make-term-in-var-form var_z)) (Max (type-to-max star_typ)) (mon_tm_z (mk-term-in-abst-form var_n var_y (mk-term-in-app-form Max tm_y (mk-term-in-app-form tm_z tm_n tm_y))))) (mk-term-in-abst-form var_y var_z var_n (mk-term-in-app-form Rec tm_y mon_tm_z tm_n))))) (begin rv)))) (define (nat-DIA-type-to-Rec-MonTm Rec_typ) (let*((nat_typ (py "nat")) (rv (DIA-type-to-Rec-MonTm nat_typ Rec_typ))) rv)) (define DIA-type-to-Rec-MonTm DIA-type-to-Rec-Term) (define (type-to-XMaj typ) (let*((Rec_tm (nat-DIA-type-to-Rec-MonTm typ)) (arr_typ (make-arrow (py "nat") typ)) (va_x (type-to-new-var arr_typ)) (tm_x (make-term-in-var-form va_x)) (Base_Tm (pt "0")) (Max_tm (type-to-max typ)) (va_z (type-to-new-var typ)) (tm_z (make-term-in-var-form va_z)) (va_k (type-to-new-var (py "nat"))) (tm_k (make-term-in-var-form va_k)) (tm_xk (make-term-in-app-form tm_x tm_k)) (Step_Tm (mk-term-in-abst-form va_k va_z (mk-term-in-app-form Max_tm tm_z tm_xk))) (XMaj_tm (mk-term-in-app-form Rec_tm Base_Tm Step_Tm)) (Succ_Tm (pt "Succ")) (rv (mk-term-in-abst-form va_x va_k (make-term-in-app-form XMaj_tm (make-term-in-app-form Succ_Tm tm_k))))) (begin (pp rv) rv))) (define (make-nat-XMaj) (let*((nat_typ (py "nat"))) (type-to-XMaj nat_typ))) (define nat-XMaj (make-nat-XMaj)) (define (term-to-maj tm) (begin (let((rv (case (tag tm) ((term-in-const-form) (if (string=? "Rec" (const-to-name (term-in-const-form-to-const tm))) (let((typ (arrow-form-to-arg-type (term-to-type tm)))) (type-to-mon-rec-tm (py "nat") typ)) tm)) ((term-in-var-form) (let((typ (term-to-type tm))) (cond ((nat-type? typ) tm) ((nat-nat-type? typ) (make-term-in-app-form nat-XMaj tm)) ((nat-rho-type? typ) (let*((rho-typ (give-rho-type typ)) (rho-XMaj (type-to-XMaj rho-typ))) (make-term-in-app-form rho-XMaj tm))) (else (myerror "term-to-maj: " "term variable is " (term-to-string tm) "which has type " (type-to-string typ) " but variables must " "have type of form _nat_ OR _nat=>rho_ in input term"))))) ((term-in-abst-form) (make-term-in-abst-form (term-in-abst-form-to-var tm) (term-to-maj (term-in-abst-form-to-kernel tm)))) ((term-in-app-form) (make-term-in-app-form (term-to-maj (term-in-app-form-to-op tm)) (term-to-maj (term-in-app-form-to-arg tm)))) ((term-in-pair-form) (make-term-in-pair-form (term-to-maj (term-in-pair-form-to-left tm)) (term-to-maj (term-in-pair-form-to-right tm)))) ((term-in-lcomp-form) (make-term-in-lcomp-form (term-to-maj (term-in-lcomp-form-to-kernel tm)))) ((term-in-rcomp-form) (make-term-in-rcomp-form (term-to-maj (term-in-rcomp-form-to-kernel tm)))) ((term-in-if-form) (myerror "term-to-maj: not wanted for If-terms ... see the code ... ")) ; ; MDH ==> discarded 19 Mai 2006 ; ; (mk-term-in-max-form (map term-to-maj (term-in-if-form-to-alts tm)))) (else (myerror "term-to-maj: term expected" tm))))) (begin rv)))) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;; Further bureaucratic procedures for the MON case ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (define (nat-type? typ) (string=? "nat" (type-to-string typ))) (define (nat-nat-type? typ) (string=? "nat=>nat" (type-to-string typ))) (define (nat-rho-type? typ) (if (arrow-form? typ) (let*((arg_typ (arrow-form-to-arg-type typ)) (val_typ (arrow-form-to-val-type typ))) (nat-type? arg_typ)) #f)) (define (give-rho-type typ) (if (arrow-form? typ) (let*((arg_typ (arrow-form-to-arg-type typ)) (val_typ (arrow-form-to-val-type typ))) val_typ) (myerror "give-rho-type:" "Arrow type argument expected ... " typ))) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;; The procedures below create the formula of inequality between ;;;;; two given terms/variables of the same type -- MDH 071117 ;;;;; Kept here since they might get useful some day ... not now ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; ; ; (define (make-leq-va typ var_x var_y) ; ; ; (if (not (equal? typ (var-to-type var_x))) ; ; ; (myerror "make-leq: 1st var must have type typ") ; ; ; (if (not (equal? typ (var-to-type var_y))) ; ; ; (myerror "make-leq: 2nd var must have type typ") ; ; ; (let*((tm_x (make-term-in-var-form var_x)) ; ; ; (tm_y (make-term-in-var-form var_y))) ; ; ; (make-leq-aux typ tm_x tm_y))))) ; ; ; (define (make-leq typ tm_x tm_y) ; ; ; (if (not (equal? typ (term-to-type tm_x))) ; ; ; (myerror "make-leq: 1st var must have type typ") ; ; ; (if (not (equal? typ (term-to-type tm_y))) ; ; ; (myerror "make-leq: 2nd var must have type typ") ; ; ; (make-leq-aux typ tm_x tm_y)))) ; ; ; (define (make-leq-aux typ tm_x tm_y) ; ; ; (cond ; ; ; ((star-form? typ) ; ; ; (let*((leq_l (make-leq ; ; ; (star-form-to-left-type typ) ; ; ; (make-term-in-lcomp-form tm_x) ; ; ; (make-term-in-lcomp-form tm_y))) ; ; ; (leq_r (make-leq ; ; ; (star-form-to-right-type typ) ; ; ; (make-term-in-rcomp-form tm_x) ; ; ; (make-term-in-rcomp-form tm_y)))) ; ; ; (make-and leq_l leq_r))) ; ; ; ((arrow-form? typ) ; ; ; (let*((va_z (type-to-new-var (arrow-form-to-arg-type typ))) ; ; ; (tm_z (make-term-in-var-form va_z))) ; ; ; (make-allnc va_z ; ; ; (make-leq (arrow-form-to-val-type typ) ; ; ; (make-term-in-app-form tm_x tm_z) ; ; ; (make-term-in-app-form tm_y tm_z))))) ; ; ; ((alg-form? typ) ; ; ; (if (string=? "nat" (alg-form-to-name typ)) ; ; ; (make-atomic-formula ; ; ; (mk-term-in-app-form (pt "NatLeq") tm_x tm_y)) ; ; ; (myerror "make-leq: only nat algebra allowed as ground type"))) ; ; ; (else ; ; ; (myerror "make-leq: type argument expected" typ)))) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;; NEW Mon End ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; These are a few useful depth/size measuring functions for proofs/terms ;; SHOULD BE included in the standard libraries --- there were some procedures ;; in the standard libraries already, but I was not happy with them --- here ;; I propose just an alternative --- MDH 25 October 2007 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (define (proof-to-depth prf) (case (tag prf) ((proof-in-avar-form proof-in-aconst-form) 0) ((proof-in-imp-intro-form) (let ((kernel (proof-in-imp-intro-form-to-kernel prf))) (+ 1 (proof-to-depth kernel)))) ((proof-in-imp-elim-form) (let ((op (proof-in-imp-elim-form-to-op prf)) (arg (proof-in-imp-elim-form-to-arg prf))) (+ 1 (max (proof-to-depth op) (proof-to-depth arg))))) ((proof-in-and-intro-form) (let ((left (proof-in-and-intro-form-to-left prf)) (right (proof-in-and-intro-form-to-right prf))) (+ 1 (max (proof-to-depth left) (proof-to-depth right))))) ((proof-in-and-elim-left-form) (let ((kernel (proof-in-and-elim-left-form-to-kernel prf))) (+ 1 (proof-to-depth kernel)))) ((proof-in-and-elim-right-form) (let ((kernel (proof-in-and-elim-right-form-to-kernel prf))) (+ 1 (proof-to-depth kernel)))) ((proof-in-tensor-intro-form) (let ((left (proof-in-tensor-intro-form-to-left prf)) (right (proof-in-tensor-intro-form-to-right prf))) (+ 1 (max (proof-to-depth left) (proof-to-depth right))))) ((proof-in-tensor-elim-left-form) (let ((kernel (proof-in-tensor-elim-left-form-to-kernel prf))) (+ 1 (proof-to-depth kernel)))) ((proof-in-tensor-elim-right-form) (let ((kernel (proof-in-tensor-elim-right-form-to-kernel prf))) (+ 1 (proof-to-depth kernel)))) ((proof-in-all-intro-form) (let ((kernel (proof-in-all-intro-form-to-kernel prf))) (+ 1 (proof-to-depth kernel)))) ((proof-in-all-elim-form) (let ((op (proof-in-all-elim-form-to-op prf))) (+ 1 (proof-to-depth op)))) ((proof-in-allnc-intro-form) (let ((kernel (proof-in-allnc-intro-form-to-kernel prf))) (+ 1 (proof-to-depth kernel)))) ((proof-in-allnc-elim-form) (let ((op (proof-in-allnc-elim-form-to-op prf))) (+ 1 (proof-to-depth op)))) (else (myerror "proof-to-depth: proof tag expected" (tag prf))))) (define (proof-to-size prf) (case (tag prf) ((proof-in-avar-form proof-in-aconst-form) 1) ((proof-in-imp-intro-form) (let ((kernel (proof-in-imp-intro-form-to-kernel prf))) (+ 1 (proof-to-size kernel)))) ((proof-in-imp-elim-form) (let ((op (proof-in-imp-elim-form-to-op prf)) (arg (proof-in-imp-elim-form-to-arg prf))) (+ 1 (+ (proof-to-size op) (proof-to-size arg))))) ((proof-in-and-intro-form) (let ((left (proof-in-and-intro-form-to-left prf)) (right (proof-in-and-intro-form-to-right prf))) (+ 1 (+ (proof-to-size left) (proof-to-size right))))) ((proof-in-and-elim-left-form) (let ((kernel (proof-in-and-elim-left-form-to-kernel prf))) (+ 1 (proof-to-size kernel)))) ((proof-in-and-elim-right-form) (let ((kernel (proof-in-and-elim-right-form-to-kernel prf))) (+ 1 (proof-to-size kernel)))) ((proof-in-tensor-intro-form) (let ((left (proof-in-tensor-intro-form-to-left prf)) (right (proof-in-tensor-intro-form-to-right prf))) (+ 1 (+ (proof-to-size left) (proof-to-size right))))) ((proof-in-tensor-elim-left-form) (let ((kernel (proof-in-tensor-elim-left-form-to-kernel prf))) (+ 1 (proof-to-size kernel)))) ((proof-in-tensor-elim-right-form) (let ((kernel (proof-in-tensor-elim-right-form-to-kernel prf))) (+ 1 (proof-to-size kernel)))) ((proof-in-all-intro-form) (let ((kernel (proof-in-all-intro-form-to-kernel prf))) (+ 1 (proof-to-size kernel)))) ((proof-in-all-elim-form) (let ((op (proof-in-all-elim-form-to-op prf))) (+ 1 (proof-to-size op)))) ((proof-in-allnc-intro-form) (let ((kernel (proof-in-allnc-intro-form-to-kernel prf))) (+ 1 (proof-to-size kernel)))) ((proof-in-allnc-elim-form) (let ((op (proof-in-allnc-elim-form-to-op prf))) (+ 1 (proof-to-size op)))) (else (myerror "proof-to-size: proof tag expected" (tag prf))))) (define (size-and-depth tm) (begin (newline) (display "***********************************************") (newline) (display "Depth of term is ") (display (term-to-depth tm)) (newline) (display "Size of term is ") (display (term-to-size tm)) (newline) (display "***********************************************") (newline) )) (define (term-to-depth tm) (case (tag tm) ((term-in-var-form term-in-const-form) 0) ((term-in-abst-form) (+ 1 (term-to-depth (term-in-abst-form-to-kernel tm)))) ((term-in-app-form) (+ 1 (max (term-to-depth (term-in-app-form-to-op tm)) (term-to-depth (term-in-app-form-to-arg tm))))) ((term-in-pair-form) (+ 1 (max (term-to-depth (term-in-pair-form-to-left tm)) (term-to-depth (term-in-pair-form-to-right tm))))) ((term-in-lcomp-form) (+ 1 (term-to-depth (term-in-lcomp-form-to-kernel tm)))) ((term-in-rcomp-form) (+ 1 (term-to-depth (term-in-rcomp-form-to-kernel tm)))) ((term-in-if-form) (begin ; (nldisplay "ATTENTION: term-to-depth: if-form detected") (+ 1 (max (term-to-depth (term-in-if-form-to-test tm)) (apply max (map term-to-depth (term-in-if-form-to-alts tm))))))) (else (myerror "default-term-to-depth: term expected" tm)))) (define (term-to-size tm) (case (tag tm) ((term-in-var-form term-in-const-form) 1) ((term-in-abst-form) (+ 1 (term-to-size (term-in-abst-form-to-kernel tm)))) ((term-in-app-form) (+ 1 (+ (term-to-size (term-in-app-form-to-op tm)) (term-to-size (term-in-app-form-to-arg tm))))) ((term-in-pair-form) (+ 1 (+ (term-to-size (term-in-pair-form-to-left tm)) (term-to-size (term-in-pair-form-to-right tm))))) ((term-in-lcomp-form) (+ 1 (term-to-size (term-in-lcomp-form-to-kernel tm)))) ((term-in-rcomp-form) (+ 1 (term-to-size (term-in-rcomp-form-to-kernel tm)))) ((term-in-if-form) (begin ; (nldisplay "ATTENTION: term-to-size: if-form detected") (+ 1 (+ (term-to-size (term-in-if-form-to-test tm)) (apply + (map term-to-size (term-in-if-form-to-alts tm))))))) (else (myerror "default-term-to-size: term expected" tm)))) (define (term-to-sad tm) (case (tag tm) ((term-in-var-form term-in-const-form) (cons 1 0)) ((term-in-abst-form) (let*((ker (term-in-abst-form-to-kernel tm)) (sad (term-to-sad ker)) (s (+ 1 (car sad))) (d (+ 1 (cdr sad)))) (cons s d))) ((term-in-app-form) (let*((op (term-in-app-form-to-op tm)) (sad_op (term-to-sad op)) (s_op (car sad_op)) (d_op (cdr sad_op)) (arg (term-in-app-form-to-arg tm)) (sad_arg (term-to-sad arg)) (s_arg (car sad_arg)) (d_arg (cdr sad_arg)) (s (+ 1 (+ s_op s_arg))) (d (+ 1 (max d_op d_arg)))) (cons s d))) ((term-in-pair-form) (let*((op (term-in-pair-form-to-right tm)) (sad_op (term-to-sad op)) (s_op (car sad_op)) (d_op (cdr sad_op)) (arg (term-in-pair-form-to-left tm)) (sad_arg (term-to-sad arg)) (s_arg (car sad_arg)) (d_arg (cdr sad_arg)) (s (+ 1 (+ s_op s_arg))) (d (+ 1 (max d_op d_arg)))) (cons s d))) ((term-in-lcomp-form) (let*((ker (term-in-lcomp-form-to-kernel tm)) (sad (term-to-sad ker)) (s (+ 1 (car sad))) (d (+ 1 (cdr sad)))) (cons s d))) ((term-in-rcomp-form) (let*((ker (term-in-rcomp-form-to-kernel tm)) (sad (term-to-sad ker)) (s (+ 1 (car sad))) (d (+ 1 (cdr sad)))) (cons s d))) ((term-in-if-form) (myerror "term-to-sad: if-form detected")) ; (begin ; (nldisplay "ATTENTION: term-to-sad: if-form detected") ; (+ 1 (+ (term-to-sad (term-in-if-form-to-test tm)) ; (apply + (map term-to-sad ; (term-in-if-form-to-alts tm))))))) (else (myerror "default-term-to-sad: term expected" tm)))) (newline) (display "******************************************************") (newline) (display "***** DIALECTICA Interpretations extraction module LOADED !!!") (newline) (display "******************************************************") (newline) minlog-4.0.99.20080304/doc/0000755000175000017500000000000011340243353014034 5ustar barralbarralminlog-4.0.99.20080304/doc/Makefile0000644000175000017500000000177310746604522015514 0ustar barralbarral## $Id: Makefile,v 1.32 2008/01/25 13:30:19 logik Exp $ ## Makefile for subdirectory doc all: .dep ls -sh .dep: mlcf.pdf mpcref.pdf ref.pdf reflection_manual.pdf tutor.pdf cleanup touch .dep mlcf.pdf: mlcf.tex bussproofs.sty minlog.bib pdflatex mlcf >> /dev/null bibtex -terse mlcf makeindex -q mlcf pdflatex mlcf >> /dev/null pdflatex mlcf >> /dev/null mpcref.pdf: mpcref.tex pdflatex mpcref.tex >> /dev/null pdflatex mpcref.tex >> /dev/null ref.pdf: ref.tex bussproofs.sty minlog.bib pdflatex ref >> /dev/null bibtex -terse ref makeindex -q ref pdflatex ref >> /dev/null pdflatex ref >> /dev/null reflection_manual.pdf: reflection_manual.tex pdflatex reflection_manual.tex >> /dev/null pdflatex reflection_manual.tex >> /dev/null tutor.pdf: tutor.tex pdflatex tutor >> /dev/null bibtex -terse tutor pdflatex tutor >> /dev/null clean: cleanup rm -rf .dep .dep.* rm -rf *.dvi *.pdf *.ps rm -rf *~ *% ls -sh cleanup: rm -rf *.aux *.log *.blg *.bbl *.idx *.toc *.ind *.ilg *.brf *.out minlog-4.0.99.20080304/doc/manual.txt0000644000175000017500000000664110131511433016053 0ustar barralbarralNeeds Update! Interaction with Minlog. ======================== This text describes how to start Minlog and how to interact with Minlog via Emacs. The very first time ------------------- Extend your .emacs file with the following definition: (defun run-minlog () "Start Minlog von Herrn Schwichtenberg" (interactive) (run-scheme "scheme -emacs -band /home/math/schwicht/minlog/src/minlog.com") Every new session ----------------- You can start a minlog session from Emacs, with the command: M-x run-minlog This results in a new buffer for Scheme interaction, called *scheme*. Here Scheme is active, with the definitions of minlog added to the current environment. In this buffer you can type any expression, using Emacs as editor. To evaluate an expression type C-x C-e immediately after an expression. The result will be computed by the Scheme-interpreter, and the result will be inserted in the buffer. In the bottom of the Scheme-window you see: Scheme: n [evaluator]. This n should always be 1. If an (Scheme-)error occurs, this number is raised to n+1. You are in a copy of Scheme, to be used for debugging. To proceed with Scheme, type C-c C-c to reenter level 1. If you forget this, new definitions may be get lost. To learn more about the Emacs scheme-mode, try C-h m, or describe-mode under the Help pull down menu. Using the Interactive Theorem Prover ------------------------------------ A minlog-command is just a scheme function. You can call it by typing a Scheme-expression as explained before. The minlog functions involved with interactive theorem proving are special: They return no value, but work via side-effects. One of the side effects is the manipulation of the global variable pproof (the current partial proof). Another side-effect is that the current goal and context are printed on the screen. Keeping a separate vernacular file ---------------------------------- It is handy to keep a separate file for the commandos you enter to Minlog. In this way you can reproduce a minlog-session. To do this, You split your window (with C-x 2) after starting Minlog. In the second window, you load a file with "C-x C-f file". This file can be a new file, or an existing one. If this file has the extension ".scm", Emacs will understand that this buffer will serve as input buffer for scheme. Otherwise, you have to type "M-x scheme-mode" first. In this input buffer you can edit Scheme-expressions. With "C-x C-e" these commands are evaluated by Scheme. The output appears in the *scheme* window. In this way you can separate input and output. After a session you can save the input (with C-x s) for later use. Loading a vernacular file. -------------------------- To load a file with scheme expressions (for example the commands of a previous minlog-session), you evaluate the scheme-expression: (load "file"). This has the effect that all the commands in "file" are executed. Looking at the source file -------------------------- It is often useful to have a look at the minlog-source file. There is a quick way to find the definition of a minlog-function, using the tag-file. The command for looking for a tag is: M-. to get the source-code in this window. C-x 4 . to get the source-code in another window. C-x 5 . to get the source-code in another frame. After typing M-. Emacs ask what to look for. The default is the word under the cursor. Then Emacs ask where the tag-table is. This is in "~schwicht/minlog/src". minlog-4.0.99.20080304/doc/minlog.mac0000644000175000017500000004433010746604522016017 0ustar barralbarral% $Id: minlog.mac,v 1.3 2008/01/25 13:30:18 logik Exp $ % minlog.mac contains the macros for all tex-files of the minlog % documentation in order to avoid code duplication and different % notations. \newcommand*{\Acc} {\mathsf{Acc}} \newcommand*{\Add} {\mathsf{Add}} \newcommand*{\App} {\mathsf{App}} \newcommand*{\BNFdef} {\mathtt{\; ::= \;}} \newcommand*{\BNFor} {\mid} \newcommand*{\Barid} {\mathsf{Bar}} \newcommand*{\CL} {\textbf{CL}} \newcommand*{\CPC} {\textbf{Cp}} % classical propositional logic \newcommand*{\CQC} {\textbf{C}}% classical logic \newcommand*{\CasesAxiom}{\mathsf{Cases}} \newcommand*{\Cases} {\C{C}} \newcommand*{\Comp} {\textsc{Comp}} \newcommand*{\C}[1] {\mathcal{#1}} % Euler Script - only caps, use as \C{A} \newcommand*{\Dup}{\mathsf{Dup}} \newcommand*{\D}[1] {\mathbb{#1}} % Doubles - only caps, use as \D{A} \newcommand*{\EfqLog} {\textsf{Efq-Log}} \newcommand*{\Efqdummy} {\mathsf{dummy}} \newcommand*{\Efq} {\mathsf{Efq}} \newcommand*{\Eig} {\mathsf{Eig}} \newcommand*{\Elim} {\mathsf{Elim}} \newcommand*{\EqCompat} {\mathsf{Eq}\hbox{-}\mathsf{Compat}} \newcommand*{\EqE} {\mathsf{Eq}^{-}} \newcommand*{\EqI} {\mathsf{Eq}^{+}} \newcommand*{\Eq} {\approx} \newcommand*{\Evenodd} {\mathsf{EvenOdd}} \newcommand*{\Even} {\mathsf{Even}} \newcommand*{\Ev} {\mathsf{Ev}} \newcommand*{\FA} {\mathsf{FA}} \newcommand*{\FV} {\mathrm{FV}} \newcommand*{\Fib} {G} \newcommand*{\Fun} {\mathsf{Fun}} \newcommand*{\F}[1] {\mathfrak{#1}} % Fraktur, use as \F{a} \newcommand*{\Good} {\mathsf{Good}} \newcommand*{\HAcontextA}{\Pi} \newcommand*{\HAcontextB}{\Gamma} \newcommand*{\HAitA}[1] {\left\{{#1}\right\}} \newcommand*{\HAtyped}[4]{{#1}\mid{#2} \vdash {#3}\colon {#4}} \newcommand*{\HA} {\textbf{HA}}% Heyting arithmetic \newcommand*{\IPC} {\textbf{Ip}} % intuitionistic propositional logic \newcommand*{\IQCE} {\textbf{Ie}}% int.logic with E-predicate \newcommand*{\IQC} {\textbf{I}}% intuitionistic logic \newcommand*{\If} {\mathsf{If}} \newcommand*{\Ins} {\mathsf{Ins}} \newcommand*{\Intro} {\mathsf{Intro}} \newcommand*{\LHA} {\textbf{LHA}}% Linear Heyting arithmetic \newcommand*{\LT} {\textbf{LT}}% Linear Goedel T \newcommand*{\MPC} {\textbf{Mp}}% minimal propositional logic \newcommand*{\MQC} {\textbf{M}}% minimal logic \newcommand*{\Neg} {\mathrm{Neg}} \newcommand*{\NumVars} {\mathsf{NumVars}} \newcommand*{\Od} {\mathsf{Od}} \newcommand*{\PV} {\mathsf{PV}} \newcommand*{\Prog} {\mathsf{Prog}} \newcommand*{\Rel} {\mathsf{Rel}} \newcommand*{\Rf}[1] {\mathsf{Rf}(#1)} \newcommand*{\SC} {\mathsf{SC}} \newcommand*{\SN} {\mathsf{SN}} \newcommand*{\ST} {\delta} \newcommand*{\Set} {\mathbb{S}} \newcommand*{\Sort} {\mathsf{Sort}} \newcommand*{\Stab} {\mathsf{Stab}} \newcommand*{\TDegs} {\mathsf{TDegs}} \newcommand*{\TERM} {textsc{Term}} \newcommand*{\Term} {\mathsf{Term}} \newcommand*{\Ter} {\mathsf{Ter}} \newcommand*{\Tikk} {\Diamond} \newcommand*{\Tlist} {\mathsf{Tlist}} \newcommand*{\Total} {\mathsf{Total}} \newcommand*{\TrCl} {\mathsf{TrCl}} \newcommand*{\Tree} {\mathsf{Tree}} \newcommand*{\Truthax} {\ax_{\mathsf{true}}} \newcommand*{\Types} {\mathsf{Types}} \newcommand*{\T} {\textbf{T}}% Goedel T \newcommand*{\Uvar}[1] {x_{#1}} %object var assigned to assumption var \newcommand*{\Var} {\mathsf{Var}} \newcommand*{\Zth} {Z} \newcommand*{\abs} {\mathsf{abs}} \newcommand*{\acronym}[1]{textsc{#1}} \newcommand*{\activation}{\mathrm{Activation}} \newcommand*{\algAcc} {\mathsf{algAcc}} \newcommand*{\allE} {{\forall}^{-}} \newcommand*{\allI} {{\forall}^{+}} \newcommand*{\allncE} {{\allnc}^{-}} \newcommand*{\allncI} {{\allnc}^{+}} \newcommand*{\allnc} {\forall^{\mathsf{nc}}} %constructive, with no comp. content \newcommand*{\andEone}{{\land}^{-}_1} \newcommand*{\andEzero}{{\land}^{-}_0} \newcommand*{\andI} {{\land}^{+}} \newcommand*{\appmon} {\mathsf{appmon}} \newcommand*{\app} {\mathsf{app}} \newcommand*{\assumption}{\mathrm{Assumption}} \newcommand*{\atom} {\mathsf{atom}} \newcommand*{\axiom} {\mathrm{Axiom}} \newcommand*{\ax} {\mathsf{Ax}} \newcommand*{\bang} {\square} \newcommand*{\betabf} {\beta\hspace{-1.35ex}\beta\hspace{-1.35ex}\beta} \newcommand*{\betapar}{\beta\mathsf{par}} \newcommand*{\betatak}{\beta\mathsf{Tak}} \newcommand*{\bigland} {\mathop{\hbox{$\bigwedge$\kern-.6em$\bigwedge$}}} \newcommand*{\biglor} {\mathop{\hbox{$\bigvee$\kern-.6em$\bigvee$}}} \newcommand*{\bincaseconst}{\B{d}} \newcommand*{\booleneg}{\mathsf{Neg}} \newcommand*{\boole} {\mathsf{boole}} \newcommand*{\bottom} {\mathsf{b\kern-0.14em b}} \newcommand*{\branch} {\mathsf{Branch}} \newcommand*{\bs} {\mathit{bs}} \newcommand*{\btree} {\mathsf{btree}} \newcommand*{\bv} {\begin{verbatim}} \newcommand*{\card}[1] {\#(#1)} \newcommand*{\caseBooleConst}{\mathsf{if}} \newcommand*{\caseListConst}{\B{c}} \newcommand*{\caseconst}{\mathsf{case}} \newcommand*{\clauses} {\mathsf{KF}} \newcommand*{\clean}[1] {{#1}^{\mathit{c}}} \newcommand*{\cnvw} {\mapsto_{\mathsf{w}}} \newcommand*{\cnv} {\mapsto} \newcommand*{\collapse}[1]{\mathrm{c}_{#1}} \newcommand*{\comUvar}[1]{\com{x}_{#1}} %object var assigned to assumption var \newcommand*{\completion}{\mathrm{Completion}} \newcommand*{\comp} {\mathsf{comp}} \newcommand*{\com}[1] {\bar{#1}} \newcommand*{\conItA}[1]{\left\{{#1}\right\}} \newcommand*{\concat}[2]{#1 \mid #2} \newcommand*{\consnil}[1]{#1{:}} \newcommand*{\constant} {\mathrm{Constant}} \newcommand*{\constrtypes}{\mathsf{KT}} \newcommand*{\constr} {\mathsf{constr}} \newcommand*{\cons} {\mathsf{cons}} \newcommand*{\contraction}{\mathrm{Contraction}} \newcommand*{\con}[2] {#1 :: #2} \newcommand*{\cvind} {\mathrm{cvind}} \newcommand*{\defiff}{\;\;:\Longleftrightarrow\;\;} \newcommand*{\dep}[1]{\mathsf{dp}(#1)} % depth of a formula or prooftree \newcommand*{\derint}{\vdash_{\mathsf{int}}} \newcommand*{\domain}[1]{\mathsf{D_{#1}}} \newcommand*{\domind} {\mathsf{Dom-Ind}} \newcommand*{\dom}{\mathsf{dom}} \newcommand*{\dotminus} {\mathbin{{{-\mkern -9.5mu \mathchoice{\raise 2pt\hbox{$\cdot$}% \mkern6mu}{\raise 2pt\hbox{$\cdot$}\mkern6mu}{\raise 1.5pt% \hbox{$\scriptstyle\cdot$}\mkern4mu}{\raise 1pt% \hbox{$\scriptscriptstyle\cdot$}\mkern4mu}}}}} \newcommand*{\elem}[2]{#1 \in #2} \newcommand*{\elim}[3]{(#1, #2.#3)} \newcommand*{\emptycontext}{\cdot} \newcommand*{\emptyls}{\epsilon} \newcommand*{\empt} {\mathsf{Empty}} \newcommand*{\emp}{\mathsf{Empty}} \newcommand*{\eps}{\varepsilon} \newcommand*{\eqrel}[2]{{=}(#1,#2)} \newcommand*{\es}{\varnothing}% the empty set \newcommand*{\etabf}{\eta\hspace{-1.28ex}\eta\hspace{-1.28ex}\eta} \newcommand*{\etapar}{\eta\mathsf{par}} \newcommand*{\et}[1] {\mathsf{et}(#1)} \newcommand*{\excE}{{\ex}^{-}} \newcommand*{\excI}{{\ex}^{+}} \newcommand*{\exca} {\exists^{\mathrm{ca}}} %classical ex with arith falsum \newcommand*{\exclE} {{\excl}^{-}} \newcommand*{\exclI} {{\excl}^{+}} \newcommand*{\excl} {\exists^{\mathsf{cl}}} %classical, with logical falsum \newcommand*{\exncE} {({\exnc})^{-}} \newcommand*{\exncI} {({\exnc})^{+}} \newcommand*{\exnc} {\exists^{\mathsf{nc}}} %constructive, with no comp. content \newcommand*{\extend}[1]{\mathrm{e}_{#1}} \newcommand*{\extrTer}[1]{\lbrack\!\lbrack#1\rbrack\!\rbrack} \newcommand*{\extrTy}[1]{\tau(#1)} \newcommand*{\ex} {\ensuremath{\exists}} %constructive existence \newcommand*{\false} {\mathsf{f\kern-0.14em f}} \newcommand*{\falsumE}{\falsum^{-}} \newcommand*{\falsum} {\bot} \newcommand*{\fini} {{\vbox{\hrule\hbox{\vrule height1.3ex\hskip0.8ex\vrule}\hrule}}} \newcommand*{\fix} {\mathsf{fix}} \newcommand*{\formulas}{\mathsf{F}} \newcommand*{\hfilll}{\hspace*{0cm plus 1filll}} \newcommand*{\ic}{\mathrm{[}\textbf{ic}\mathrm{]}} \newcommand*{\id}{\mathsf{id}} \newcommand*{\ifthenelse}[3] {[\textbf{if}\; #1\; \textbf{then}\; #2\; \textbf{else}\; #3]} \newcommand*{\impE}{{\to}^{-}} \newcommand*{\impI}{{\to}^{+}} \newcommand*{\imp}{\Rightarrow} \newcommand*{\indexentry}[1]{\emph{#1}\index{#1}} \newcommand*{\ind} {\mathsf{Ind}} \newcommand*{\inj} {\mathsf{in}} \newcommand*{\inquotes}[1]{``#1''} \newcommand*{\ins}{\mathsf{ins}} \newcommand*{\inviso}{\tilde{\varphi}} %and its inverse \newcommand*{\iso}{\varphi} %\iso : rho -> clean(rho) Isomorphism \newcommand*{\iterate}{\mathit{I}} \newcommand*{\lambdamon}{\lambda\mathsf{mon}} \newcommand*{\lambf} {\lambda\hspace{-1.28ex}\lambda\hspace{-1.28ex}\lambda} \newcommand*{\lametato}{\lambf\etabf_{\to}} \newcommand*{\lamnc}{\lambda^{\mathsf{nc}}} \newcommand*{\lamto}{\lambf_{\to}} \newcommand*{\leafsize}[1]{\mathsf{ls}(#1)} %leaf size of a formula or prooftree \newcommand*{\leaf} {\mathsf{Leaf}} \newcommand*{\len}[1]{\mathsf{len}(#1)} \newcommand*{\lev}[1]{\mathsf{lev}(#1)} \newcommand*{\linto}{\multimap} \newcommand*{\listappend}{\mathbin{{:}{+}{:}}} \newcommand*{\listrev}{\mathsf{Rev}} \newcommand*{\lorcl}{\lor^{\mathsf{cl}}} \newcommand*{\lorc}{\lor} %constructive version, written without * \newcommand*{\lst} {\mathsf{list}} \newcommand*{\ls}[2]{[\,#1\mid #2\,]} \newcommand*{\ltensorE}{\ltensor^{-}} \newcommand*{\ltensorI}{\ltensor^{+}} \newcommand*{\ltensor}{\otimes} \newcommand*{\mdir} {\~{}/minlog} \newcommand*{\mic} {\mathrm{[}\textbf{mic}\mathrm{]}} \newcommand*{\mi} {\textsc{MINLOG}} \newcommand*{\mrAAA} {\;\mathbf{r}_{\C{A}}\;} \newcommand*{\mrind} {\mathbf{r}} \newcommand*{\mr} {\,\mathbf{r}\,} \newcommand*{\mylabel}[1]{\label{#1}} \newcommand*{\nat} {\mathsf{nat}} \newcommand*{\next} {\mathsf{nxt}} \newcommand*{\nf}[1] {\mathsf{nf}(#1)} \newcommand*{\nil} {\mathsf{Nil}} \newcommand*{\nulltype}{\varepsilon} \newcommand*{\n} {\mathbf{n}} \newcommand*{\ob} {\ensuremath{\rightarrow}} \newcommand*{\odd} {\mathsf{Odd}} \newcommand*{\one} {\mathsf{1}} \newcommand*{\orcE} {\lorc^{-}} \newcommand*{\orcIone} {\lorc^{+}_1} \newcommand*{\orcIzero}{\lorc^{+}_0} \newcommand*{\orcI} {{\lorc}\mathsf{I}} \newcommand*{\orclE} {{\lorcl}^{-}} \newcommand*{\orclIone}{{\lorcl}^{+}_1} \newcommand*{\orclIzero}{{\lorcl}^{+}_0} \newcommand*{\pair}[2]{\langle #1,#2\rangle} \newcommand*{\passification}{\mathrm{Passification}} \newcommand*{\passto}{\leadsto} \newcommand*{\pass}[1]{\tilde{#1}} \newcommand*{\pick}{\mathsf{pick}} \newcommand*{\polylen}[1]{\vartheta(#1)} \newcommand*{\preds}{\mathsf{P}} \newcommand*{\pred}{\mathsf{P}} \newcommand*{\p}{\mathsf{P}} \newcommand*{\quadsplittyped}[6] {{#1}\mid{#2}\mid{#3}\mid{#4} \vdash {#5}\colon {#6}} \newcommand*{\ran}{\mathsf{ran}} \newcommand*{\rec} {\C{R}} \newcommand*{\redlplus}{\leftarrow^+} \newcommand*{\redlrstar}{\overset{*}{\leftrightarrow}} \newcommand*{\redlr}{\leftrightarrow} \newcommand*{\redlstar}{\leftarrow^*} \newcommand*{\redl}{\leftarrow} \newcommand*{\redpar}{\rightarrow_{p}} \newcommand*{\redplus}{\rightarrow^+} \newcommand*{\redstarw}{\rightarrow^*_{\mathsf{w}}} \newcommand*{\redstar}{\rightarrow^*} \newcommand*{\redtak}{\rightarrow_{T}} \newcommand*{\red}{\rightarrow} \newcommand*{\rew} {\mathsf{rew}} \newcommand*{\rf}[3]{\mathsf{rf}(#1;#2;#3)} \newcommand*{\select} {\mathsf{sel}} \newcommand*{\seqarrow}{\Rightarrow} \newcommand*{\set}[2] {\{\,#1\mid#2\,\}} \newcommand*{\size}[1]{|#1|} %size of a prooftree or formula \newcommand*{\sn}{\mathsf{sn}} \newcommand*{\sort}{\mathsf{sort}} \newcommand*{\splittyped}[4]{{#1}\mid{#2} \vdash {#3}\colon {#4}} \newcommand*{\substnarrow}[3]{#1[#2{:=}#3]} \newcommand*{\subst}[3]{#1[#2 {:=} #3]} \newcommand*{\suc} {\mathsf{S}} \newcommand*{\s}{\mathsf{S}} \newcommand*{\tcons} {\mathsf{Tcons}} \newcommand*{\termBoolElim}{\typeBool^-} \newcommand*{\termContextA}{\Gamma} \newcommand*{\termProdElimLeft}{\mathsf{fst}} \newcommand*{\termProdElimRight}{\mathsf{snd}} \newcommand*{\termProdIntro}{\typeProd^+} \newcommand*{\termSumElim}{\typeSum^-} \newcommand*{\termSumIntroLeft}{\mathsf{inl}} \newcommand*{\termSumIntroRight}{\mathsf{inr}} \newcommand*{\termTensorElim}{\typeTensor^-} \newcommand*{\termTensorIntro}{\typeTensor^+} \newcommand*{\termUnit}{\mathsf{Dummy}} \newcommand*{\theoremname}{testing} \newcommand*{\tlist} {\mathsf{tlist}} \newcommand*{\tree} {\mathsf{tree}} \newcommand*{\trisplittyped}[5] {{#1}\mid{#2}\mid{#3} \vdash {#4}\colon {#5}} \newcommand*{\true} {\mathsf{t\kern-0.14em t}} \newcommand*{\truth} {\top} \newcommand*{\tval}[1]{v(#1)} \newcommand*{\typeBin}{\mathbf{Bin}} \newcommand*{\typeBoolInd}{\typeBool\mathrm{-Ind}} \newcommand*{\typeBool}{\mathbf{B}} \newcommand*{\typeD}{\diamond} \newcommand*{\typeLinTo}{\multimap} \newcommand*{\typeLrhoInd}{\typeL{\rho}\mathrm{-Ind}} \newcommand*{\typeLrhoStrongInd}{\typeL{\rho}\mathrm{-Ind}^+} \newcommand*{\typeLtauInd}{\typeL{\tau}\mathrm{-Ind}} \newcommand*{\typeLtauStrongInd}{\typeL{\tau}\mathrm{-Ind}^+} \newcommand*{\typeL}[1]{\mathbf{L}(#1)} \newcommand*{\typeNInd}{\typeN\mathrm{-Ind}} \newcommand*{\typeNStrongInd}{\typeN\mathrm{-Ind}^+} \newcommand*{\typeN}{\mathbf{N}} \newcommand*{\typeProd}{\times} \newcommand*{\typeSum}{+} \newcommand*{\typeTensorInd}{\typeTensor\mathrm{-Ind}} \newcommand*{\typeTensor}{\otimes} \newcommand*{\typeTo}{\to} \newcommand*{\typeUnit}{\mathbf{U}} \newcommand*{\typeW}{\mathbf{W}} \newcommand*{\typed}[3]{{#1}\vdash{#2} \colon {#3}} \newcommand*{\types}{\mathsf{T}} \newcommand*{\typ}{\C{T}_{\to}} \newcommand*{\unifalg}[1]{\Longrightarrow_{#1}} \newcommand*{\unifprefixalg}[1]{\longrightarrow_{#1}} \newcommand*{\unif}[3]{\mathsf{unif}(#1, #2 = #3)} \newcommand*{\unit} {\mathsf{unit}} \newcommand*{\variable}{\mathrm{Variable}} \newcommand*{\vars}{\mathsf{vars}} \newcommand*{\vdashc}{\vdash_{\mathrm{c}}} % derivability in classical logic \newcommand*{\vdashi}{\vdash_{\mathrm{i}}} % derivability in intuitionistic logic \newcommand*{\vdashm}{\vdash_{\mathrm{m}}} % derivability in minimal logic \newcommand*{\weakening}{\mathrm{Weakening}} \newcommand*{\weight}[1]{w(#1)}% weight of a type \newcommand*{\ws}{\mathit{w}\!\mathit{s}} \newcommand*{\ypair} {\mathsf{ypair}} \newcommand*{\yplus} {\mathsf{yplus}} \newcommand*{\ytensor}{\mathsf{ytensor}} \newenvironment{enumeratei} {\begin{enumerate}[\upshape (i)]} {\end{enumerate}} % produces (i), (ii), etc, Cross-reference with \eqref \newenvironment{enumeratea} {\begin{enumerate}[\upshape (a)]} {\end{enumerate}} % produces (a), (b), etc, Cross-reference with \eqref \endinput % % Font commands % \newcommand{\B}{\boldsymbol} % \newcommand{\C}[1]{\mathcal{#1}} % \newcommand{\D}[1]{\mathbb{#1}} % \newcommand{\F}[1]{\mathfrak{#1}} % % Logic % \newcommand{\ex}{\exists} %constructive existence % \newcommand{\exca}{\exists^{\mathrm{ca}}} %classical ex with arith falsum % \newcommand{\excl}{\exists^{\mathrm{cl}}} %classical ex with logic falsum % \newcommand{\falsum}{\bot} % \newcommand{\FV}{\mathrm{FV}} % \newcommand{\ltensor}{\otimes} % \newcommand{\truth}{\top} % \newcommand{\bigland}{\mathop{\hbox{$\bigwedge$\kern-.6em$\bigwedge$}}} % \newcommand{\biglor}{\mathop{\hbox{$\bigvee$\kern-.6em$\bigvee$}}} % % Types % \newcommand{\unit}{\mathsf{unit}} % \newcommand{\boole}{\mathsf{boole}} % \newcommand{\btree}{\mathsf{btree}} % \newcommand{\nat}{\mathsf{nat}} % \newcommand{\lst}{\mathsf{list}} % \newcommand{\tlist}{\mathsf{tlist}} % \newcommand{\tree}{\mathsf{tree}} % \newcommand{\Types}{\mathsf{Types}} % \newcommand{\ypair}{\mathsf{ypair}} % \newcommand{\yplus}{\mathsf{yplus}} % \newcommand{\ytensor}{\mathsf{ytensor}} % % SFAs % \newcommand{\leaf}{\mathsf{Leaf}} % \newcommand{\branch}{\mathsf{Branch}} % \newcommand{\tcons}{\mathsf{Tcons}} % \newcommand{\empt}{\mathsf{Empty}} % \newcommand{\constr}{\mathsf{constr}} % \newcommand{\constrtypes}{\mathsf{KT}} % \newcommand{\rec}{\C{R}} % \newcommand{\ST}{\delta} % \newcommand{\pair}[2]{\langle #1,#2\rangle} % \newcommand{\inj}{\mathsf{in}} % \newcommand{\bs}{\mathit{bs}} % \newcommand{\nil}{\mathsf{nil}} % \newcommand{\cons}{\mathsf{cons}} % \newcommand{\true}{\mathsf{t\kern-0.14em t}} % \newcommand{\false}{\mathsf{f\kern-0.14em f}} % \newcommand{\bottom}{\mathsf{b\kern-0.14em b}} % \newcommand{\suc}{\mathsf{S}} % \newcommand{\termUnit}{\varepsilon} % \newcommand{\termBoolElim}{\typeBool^-}% % \newcommand{\termTensorIntro}{\typeTensor^+} % \newcommand{\termTensorElim}{\typeTensor^-} % \newcommand{\termProdIntro}{\typeProd^+} % \newcommand{\termProdElimLeft}{\mathsf{fst}} % \newcommand{\termProdElimRight}{\mathsf{snd}} % \newcommand{\termSumIntroLeft}{\mathsf{inl}} % \newcommand{\termSumIntroRight}{\mathsf{inr}} % \newcommand{\termSumElim}{\typeSum^-} % \newcommand{\Eq}{\approx} % \newcommand{\eqrel}[2]{{=}(#1,#2)} % % Rewriting % \newcommand{\cnv}{\mapsto} % % Predicates % \newcommand{\atom}{\mathsf{atom}} % \newcommand{\rew}{\mathsf{rew}} % \newcommand{\select}{\mathsf{sel}} % \newcommand{\Total}{\mathsf{Total}} % % Miscellaneous % \newcommand{\Comp}{\textsc{Comp}} % \newcommand{\comp}{\mathsf{comp}} % \newcommand{\et}[1]{\mathsf{et}(#1)} % \newcommand{\fix}{\mathsf{fix}} % \newcommand{\ind}{\mathsf{Ind}} % \newcommand{\domind}{\mathsf{Dom-Ind}} % \newcommand{\indexentry}[1]{\emph{#1}\index{#1}} % \newcommand{\inquotes}[1]{``#1''} % \newcommand{\NumVars}{\mathsf{NumVars}} % \newcommand{\seqarrow}{\Rightarrow} % \newcommand{\set}[2]{\{\,#1\mid#2\,\}} % \newcommand{\subst}[3]{#1[#2 {:=} #3]} % \newcommand{\TDegs}{\mathsf{TDegs}} % % Font commands % \newcommand*{\B}{\boldsymbol} % % Bold math symbol, use as \B{a} % \newcommand*{\C}[1]{\mathcal{#1}} % % Euler Script - only caps, use as \C{A} % \newcommand*{\D}[1]{\mathbb{#1}} % % Doubles - only caps, use as \D{A} % \newcommand*{\F}[1]{\mathfrak{#1}} % % Fraktur, use as \F{a} % %% Logic % \newcommand*{\ex}{\exists} %constructive existence % \newcommand*{\exca}{\exists^{\mathrm{ca}}} %classical ex with arith falsum % \newcommand*{\excl}{\exists^{\mathrm{cl}}} %classical ex with logic falsum % \newcommand*{\boole}{\mathsf{boole}} % \newcommand*{\inquotes}[1]{``#1''} % \newcommand*{\set}[2]{\{\,#1\mid#2\,\}} % \newcommand*{\subst}[3]{#1[#2 {:=} #3]} % \newcommand*{\listappend}{\mathbin{{:}{+}{:}}} % \newcommand*{\listrev}{\mathsf{Rev}} % \newcommand*{\mi}{\textsc{MINLOG}} % \newcommand*{\nil}{\mathsf{Nil}} % \newcommand*{\ob}{\ensuremath{\rightarrow}} % \newcommand*{\bv}{\begin{verbatim}} % \newcommand*{\mdir}{\~{}/minlog} minlog-4.0.99.20080304/doc/acknow.tex0000644000175000017500000000237010746604522016052 0ustar barralbarral% $Id: acknow.tex,v 1.4 2008/01/25 13:30:19 logik Exp $ \subsection*{Acknowledgements} The \textsc{Minlog} system has been under development since around 1990. My sincere thanks go to the many contributors: % Klaus Aehlig (many contributions, in particular efficiency related issues), Holger Benl (Dijkstra algorithm, inductive data types), Ulrich Berger (very many contributions), Michael Bopp (program development by proof transformation), Wilfried Buchholz (translation of classical proof into intuitionistic ones), Laura Cro\-silla (tutorial), Matthias Eberl (normalization by evaluation), Dan Hernest (functional interpretation), Felix Joachimski (many contributions, in particular translation of classical proofs into intuitionistic ones, producing Tex output, documentation), Ralph Matthes (documentation), Karl-Heinz Niggl (program development by proof transformation), Jaco van de Pol (experiments concerning monotone functionals), Martin Ruckert (many contributions, in particular the MPC tool), Robert St\"ark (alpha equivalence), Monika Seisenberger (many contributions, including inductive definitions and translation of classical proofs into intuitionistic ones), Klaus Weich (proof search, the Fibonacci numbers example), Wolfgang Zuber (documentation). minlog-4.0.99.20080304/doc/notation.sty0000644000175000017500000003631210746604522016445 0ustar barralbarral% $Id: notation.sty,v 1.7 2008/01/25 13:30:18 logik Exp $ % \NeedsTexFormat{LaTeX2e}[1999/06/01]% does not yet work \ProvidesPackage{notation}[2001/04/21 General Commands] \RequirePackage{amsmath} \RequirePackage{amssymb} \RequirePackage{latexsym} \RequirePackage{verbatim} \RequirePackage{enumerate} %allows \begin{comment} text \end{comment} % Font commands \newcommand{\B}{\boldsymbol} % Bold math symbol, use as \B{a} \newcommand{\C}[1]{\mathcal{#1}} % Euler Script - only caps, use as \C{A} \newcommand{\D}[1]{\mathbb{#1}} % Doubles - only caps, use as \D{A} \newcommand{\F}[1]{\mathfrak{#1}} % Fraktur, use as \F{a} \newcommand{\betabf}{\beta\hspace{-1.35ex}\beta\hspace{-1.35ex}\beta} % bold beta \newcommand{\lambf}{\lambda\hspace{-1.28ex}\lambda\hspace{-1.28ex}\lambda} % bold lambda \newcommand{\etabf}{\eta\hspace{-1.28ex}\eta\hspace{-1.28ex}\eta} % bold eta % Logic: formulas \newcommand{\exc}{\exists} %constructive version, written without * \newcommand{\lorc}{\lor} %constructive version, written without * \newcommand{\allnc}{\forall^{\mathsf{nc}}} %constructive, with no comp. content \newcommand{\exnc}{\exists^{\mathsf{nc}}} %constructive, with no comp. content \newcommand{\exca}{\exists^{\mathsf{ca}}} %classical, with arthmetic falsum \newcommand{\excl}{\exists^{\mathsf{cl}}} %classical, with logical falsum \newcommand{\lorcl}{\lor^{\mathsf{cl}}} \newcommand{\falsum}{\bot} \newcommand{\FV}{\mathsf{FV}} \newcommand{\ltensor}{\otimes} \newcommand{\truth}{\top} \newcommand{\bigland}{\mathop{\hbox{$\bigwedge$\kern-.6em$\bigwedge$}}} \newcommand{\biglor}{\mathop{\hbox{$\bigvee$\kern-.6em$\bigvee$}}} \newcommand{\linto}{\multimap} \newcommand{\passto}{\leadsto} % \newcommand{\passto}{\rightsquigarrow} \newcommand{\com}[1]{\bar{#1}} \newcommand{\pass}[1]{\tilde{#1}} % Logic: rules \newcommand{\andI}{{\land}^{+}} \newcommand{\andEzero}{{\land}^{-}_0} \newcommand{\andEone}{{\land}^{-}_1} \newcommand{\orcE}{\lorc^{-}} \newcommand{\orcIzero}{\lorc^{+}_0} \newcommand{\orcIone}{\lorc^{+}_1} \newcommand{\orclE}{{\lorcl}^{-}} \newcommand{\orclIzero}{{\lorcl}^{+}_0} \newcommand{\orclIone}{{\lorcl}^{+}_1} \newcommand{\allI}{{\forall}^{+}} \newcommand{\allE}{{\forall}^{-}} \newcommand{\allncI}{{\allnc}^{+}} \newcommand{\allncE}{{\allnc}^{-}} \newcommand{\excI}{{\exc}^{+}} \newcommand{\excE}{{\exc}^{-}} \newcommand{\exncI}{({\exnc})^{+}} \newcommand{\exncE}{({\exnc})^{-}} \newcommand{\exclI}{{\excl}^{+}} \newcommand{\exclE}{{\excl}^{-}} \newcommand{\impI}{{\to}^{+}} \newcommand{\impE}{{\to}^{-}} \newcommand{\ltensorI}{\ltensor^{+}} \newcommand{\ltensorE}{\ltensor^{-}} \newcommand{\falsumE}{\falsum^{-}} % Logic: proof terms \newcommand{\elim}[3]{(#1, #2.#3)} \newcommand{\orcI}{{\lorc}\mathsf{I}} \newcommand{\lamnc}{\lambda^{\mathsf{nc}}} % Languages \newcommand{\Fun}{\mathsf{Fun}} \newcommand{\Rel}{\mathsf{Rel}} \newcommand{\Ter}{\mathsf{Ter}} \newcommand{\vars}{\mathsf{vars}} % Lambda calculus \newcommand{\cnv}{\mapsto} \newcommand{\red}{\rightarrow} \newcommand{\redl}{\leftarrow} \newcommand{\redlstar}{\leftarrow^*} \newcommand{\redlplus}{\leftarrow^+} \newcommand{\redlr}{\leftrightarrow} \newcommand{\redlrstar}{\overset{*}{\leftrightarrow}} \newcommand{\redstar}{\rightarrow^*} \newcommand{\redplus}{\rightarrow^+} % \newcommand{\redstar}{\overset{*}{\rightarrow}} % \newcommand{\redplus}{\overset{+}{\rightarrow}} \newcommand{\sn}{\mathsf{sn}} \newcommand{\SC}{\mathsf{SC}} \newcommand{\SN}{\mathsf{SN}} \newcommand{\Var}{\mathsf{Var}} % Parallel reduction \newcommand{\appmon}{\mathsf{appmon}} \newcommand{\betapar}{\beta\mathsf{par}} \newcommand{\etapar}{\eta\mathsf{par}} \newcommand{\id}{\mathsf{id}} \newcommand{\lambdamon}{\lambda\mathsf{mon}} \newcommand{\redpar}{\rightarrow_{p}} % Takahashi's Parallel reduction \newcommand{\betatak}{\beta\mathsf{Tak}} \newcommand{\redtak}{\rightarrow_{T}} % Combinatory logic \newcommand{\CL}{\textbf{CL}} \newcommand{\cnvw}{\mapsto_{\mathsf{w}}} \newcommand{\redstarw}{\rightarrow^*_{\mathsf{w}}} % Sets \newcommand{\es}{\varnothing}% the empty set \newcommand{\set}[2]{\{\,#1\mid#2\,\}} % Lists \newcommand{\ls}[2]{[\,#1\mid #2\,]} % Derivation symbols \newcommand{\vdashm}{\vdash_{\mathrm{m}}}% derivability in minimal logic \newcommand{\vdashi}{\vdash_{\mathrm{i}}}% derivability in intuitionistic logic \newcommand{\vdashc}{\vdash_{\mathrm{c}}}% derivability in classical logic % Systems \newcommand{\MPC}{\textbf{Mp}}% minimal propositional logic \newcommand{\IPC}{\textbf{Ip}}% intuitionistic propositional logic \newcommand{\CPC}{\textbf{Cp}}% classical propositional logic \newcommand{\MQC}{\textbf{M}}% minimal logic \newcommand{\IQC}{\textbf{I}}% intuitionistic logic \newcommand{\CQC}{\textbf{C}}% classical logic \newcommand{\IQCE}{\textbf{Ie}}% int.logic with E-predicate \newcommand{\lamto}{\lambf_{\to}} \newcommand{\lametato}{\lambf\etabf_{\to}} \newcommand{\HA}{\textbf{HA}}% Heyting arithmetic \newcommand{\LHA}{\textbf{LHA}}% Linear Heyting arithmetic \newcommand{\LT}{\textbf{LT}}% Linear Goedel T \newcommand{\T}{\textbf{T}}% Goedel T % Combinations for designating several systems at once \newcommand{\mic}{\mathrm{[}\textbf{mic}\mathrm{]}} \newcommand{\mi}{\mathrm{[}\textbf{mi}\mathrm{]}} \newcommand{\ic}{\mathrm{[}\textbf{ic}\mathrm{]}} % de Bruijn indices \newcommand{\Abs}{\mathsf{Abs}} \newcommand{\Term}{\mathsf{Term}} \newcommand{\TERM}{textsc{Term}} \newcommand{\abs}{\mathsf{abs}} \newcommand{\app}{\mathsf{app}} % Indexing \renewcommand{\glossary}{\index} \newcommand{\indexentry}[1]{\emph{#1}\index{#1}} \newcommand{\mylabel}[1]{\label{#1}} % \newcommand{\mylabel}[1]{\label{#1}\marginpar{\tiny #1}} % Pattern unification and search \newcommand{\unif}[3]{\mathsf{unif}(#1, #2 = #3)} \newcommand{\unifalg}[1]{\Longrightarrow_{#1}} \newcommand{\unifprefixalg}[1]{\longrightarrow_{#1}} % Sequents \newcommand{\seqarrow}{\Rightarrow} % SFAs \newcommand{\leaf}{\mathsf{Leaf}} \newcommand{\tree}{\mathsf{tree}} \newcommand{\branch}{\mathsf{Branch}} \newcommand{\tlist}{\mathsf{tlist}} \newcommand{\tcons}{\mathsf{Tcons}} \newcommand{\constr}{\mathsf{constr}} \newcommand{\rec}{\C{R}} % \newcommand{\rec}{\mathsf{R}} \newcommand{\Cases}{\C{C}} % \newcommand{\Cases}{\mathsf{Cases}} \newcommand{\ind}{\mathsf{Ind}} % \newcommand{\closure}{\mathsf{Clos}} % \newcommand{\mini}{\mathsf{Mini}} \newcommand{\CasesAxiom}{\mathsf{Cases}} \newcommand{\ST}{\delta} \newcommand{\pair}[2]{\langle #1,#2\rangle} \newcommand{\inj}{\mathsf{in}} \newcommand{\bs}{\mathit{b}\!\mathit{s}} % \newcommand{\bs}{\mathit{bs}} \newcommand{\nil}{\mathsf{Nil}} \newcommand{\cons}{\mathsf{Cons}} \newcommand{\true}{\mathsf{t\kern-0.14em t}} \newcommand{\false}{\mathsf{f\kern-0.14em f}} \newcommand{\bottom}{\mathsf{b\kern-0.14em b}} \newcommand{\suc}{\mathsf{S}} \newcommand{\termUnit}{\mathsf{Dummy}} % \newcommand{\termUnit}{\varepsilon} \newcommand{\emp}{\mathsf{Empty}} \newcommand{\termBoolElim}{\typeBool^-}% \newcommand{\termTensorIntro}{\typeTensor^+} \newcommand{\termTensorElim}{\typeTensor^-} \newcommand{\termProdIntro}{\typeProd^+} \newcommand{\termProdElimLeft}{\mathsf{Fst}} \newcommand{\termProdElimRight}{\mathsf{Snd}} \newcommand{\termSumIntroLeft}{\mathsf{Inl}} \newcommand{\termSumIntroRight}{\mathsf{Inr}} \newcommand{\termSumElim}{\typeSum^-} \newcommand{\eqrel}[2]{{=}(#1,#2)} % Types \newcommand{\types}{\mathsf{T}} \newcommand{\constrtypes}{\mathsf{KT}} \newcommand{\bang}{\square} \newcommand{\typeD}{\diamond} % \newcommand{\typeN}{\mathsf{nat}} \newcommand{\typeN}{\mathbf{N}} \newcommand{\typeW}{\mathbf{W}} \newcommand{\typeBin}{\mathbf{Bin}} % \newcommand{\typeBool}{\mathsf{boole}} \newcommand{\typeBool}{\mathbf{B}} % \newcommand{\typeUnit}{\mathsf{unit}} \newcommand{\typeUnit}{\mathbf{U}} \newcommand{\typeTo}{\to} \newcommand{\typeLinTo}{\multimap} % \newcommand{\typeLinTo}{\to} \newcommand{\typeTensor}{\otimes} \newcommand{\typeProd}{\times} \newcommand{\typeSum}{+} % \newcommand{\typeL}[1]{\mathsf{list}(#1)} \newcommand{\typeL}[1]{\mathbf{L}(#1)} % Arithmetic \newcommand{\0}{\mathsf{0}} \newcommand{\one}{\mathsf{1}} \newcommand{\n}{\mathbf{n}} \newcommand{\s}{\mathsf{S}} \newcommand{\p}{\mathsf{P}} \newcommand{\caseconst}{\mathsf{case}} \newcommand{\bincaseconst}{\B{d}} \newcommand{\caseBooleConst}{\mathsf{if}} \newcommand{\caseListConst}{\B{c}} \newcommand{\dotminus}{\mathbin{{{-\mkern -9.5mu% \mathchoice{\raise 2pt\hbox{$\cdot$}\mkern6mu}% {\raise 2pt\hbox{$\cdot$}\mkern6mu}% {\raise 1.5pt\hbox{$\scriptstyle\cdot$}\mkern4mu}% {\raise 1pt\hbox{$\scriptscriptstyle\cdot$}\mkern4mu}}}}} % Contexts \newcommand{\termContextA}{\Gamma} \newcommand{\HAcontextA}{\Pi} \newcommand{\HAcontextB}{\Gamma} \newcommand{\emptycontext}{\cdot} % Typing judgements \newcommand{\typed}[3]{{#1}\vdash{#2} \colon {#3}} % \newcommand{\typed}[3]{{#1}\vdash{#2}^{#3}} \newcommand{\splittyped}[4]{{#1}\mid{#2} \vdash {#3}\colon {#4}} \newcommand{\trisplittyped}[5]{{#1}\mid{#2}\mid{#3} \vdash {#4}\colon {#5}} \newcommand{\quadsplittyped}[6]{{#1}\mid{#2}\mid{#3}\mid{#4} \vdash {#5}\colon {#6}} \newcommand{\HAtyped}[4]{{#1}\mid{#2} \vdash {#3}\colon {#4}} % Axioms and rules \newcommand{\variable}{\mathrm{Variable}} \newcommand{\constant}{\mathrm{Constant}} \newcommand{\axiom}{\mathrm{Axiom}} \newcommand{\assumption}{\mathrm{Assumption}} \newcommand{\typeTensorInd}{\typeTensor\mathrm{-Ind}} \newcommand{\typeBoolInd}{\typeBool\mathrm{-Ind}} \newcommand{\typeLrhoInd}{\typeL{\rho}\mathrm{-Ind}} \newcommand{\typeLtauInd}{\typeL{\tau}\mathrm{-Ind}} \newcommand{\typeLrhoStrongInd}{\typeL{\rho}\mathrm{-Ind}^+} \newcommand{\typeLtauStrongInd}{\typeL{\tau}\mathrm{-Ind}^+} \newcommand{\typeNInd}{\typeN\mathrm{-Ind}} % \newcommand{\typeNStrongInd}{\typeN\mathrm{-PRInd}} \newcommand{\typeNStrongInd}{\typeN\mathrm{-Ind}^+} \newcommand{\contraction}{\mathrm{Contraction}} \newcommand{\weakening}{\mathrm{Weakening}} \newcommand{\passification}{\mathrm{Passification}} \newcommand{\activation}{\mathrm{Activation}} \newcommand{\completion}{\mathrm{Completion}} \newcommand{\Neg}{\mathrm{Neg}} \newcommand\VarBed{\hbox{\mathsf{VarCond}}} % Derivation terms \newcommand{\HAitA}[1]{\left\{{#1}\right\}} % Functions and relations for examples \newcommand{\ins}{\mathsf{ins}} \newcommand{\sort}{\mathsf{sort}} \newcommand{\Add}{\mathsf{Add}} \newcommand{\Dup}{\mathsf{Dup}} \newcommand{\Sort}{\mathsf{Sort}} \newcommand{\Ins}{\mathsf{Ins}} % Extraction \newcommand{\extrTy}[1]{\tau(#1)} \newcommand{\extrTer}[1]{\lbrack\!\lbrack#1\rbrack\!\rbrack} \newcommand{\Uvar}[1]{x_{#1}} %object var assigned to assumption var \newcommand{\comUvar}[1]{\com{x}_{#1}} %object var assigned to assumption var \newcommand{\nulltype}{\varepsilon} % Warshall algorithm \newcommand{\emptyls}{\epsilon} \newcommand{\con}[2]{#1 :: #2} \newcommand{\consnil}[1]{#1{:}} \newcommand{\elem}[2]{#1 \in #2} \newcommand{\Rf}[1]{\mathsf{Rf}(#1)} \newcommand{\concat}[2]{#1 \mid #2} % Cleaning \newcommand{\iso}{\varphi} %\iso : rho -> clean(rho) Isomorphism \newcommand{\inviso}{\tilde{\varphi}} %and its inverse \newcommand{\collapse}[1]{\mathrm{c}_{#1}} \newcommand{\extend}[1]{\mathrm{e}_{#1}} % Inductively generated predicates \newcommand{\clauses}{\mathsf{KF}} \newcommand{\formulas}{\mathsf{F}} \newcommand{\preds}{\mathsf{P}} \newcommand{\Acc}{\mathsf{Acc}} \newcommand{\Barid}{\mathsf{Bar}} \newcommand{\Good}{\mathsf{Good}} \newcommand{\Even}{\mathsf{Even}} \newcommand{\Ev}{\mathsf{Ev}} \newcommand{\Od}{\mathsf{Od}} \newcommand{\Tree}{\mathsf{Tree}} \newcommand{\Tlist}{\mathsf{Tlist}} \newcommand{\TrCl}{\mathsf{TrCl}} \newcommand{\algAcc}{\mathsf{algAcc}} % Program extraction from classical logic \newcommand{\Truthax}{\ax_{\mathsf{true}}} \newcommand{\ax}{\mathsf{Ax}} \newcommand{\derint}{\vdash_{\mathsf{int}}} \newcommand{\Zth}{Z} \newcommand{\Efqdummy}{\mathsf{dummy}} \newcommand{\mrAAA}{\;\mathbf{r}_{\C{A}}\;} \newcommand{\Fib}{G} \newcommand{\ran}{\mathsf{ran}} \newcommand{\Prog}{\mathsf{Prog}} \newcommand{\cvind}{\mathrm{cvind}} \newcommand{\ifthenelse}[3]{[\textbf{if}\; #1\; \textbf{then}\; #2\; \textbf{else}\; #3]} % \newcommand{\ifthenelse}[3]{\textbf{if}\ #1\ \textbf{then}\ #2\ \textbf{else}\ #3} % Miscellaneous \newcommand{\acronym}[1]{textsc{#1}} \newcommand{\atom}{\mathsf{atom}} \newcommand{\App}{\mathsf{App}} \newcommand{\BNFdef}{\mathtt{\; ::= \;}} \newcommand{\BNFor}{\mid} \newcommand{\booleneg}{\mathsf{Neg}} \newcommand{\card}[1]{\#(#1)} % \newcommand{\card}[1]{|#1|} \newcommand{\clean}[1]{{#1}^{\mathit{c}}} % \newcommand{\clean}[1]{\mathsf{clean}(#1)} \newcommand{\Comp}{\textsc{Comp}} \newcommand{\comp}{\mathsf{comp}} \newcommand{\conItA}[1]{\left\{{#1}\right\}} \newcommand{\defiff}{\;\;:\Longleftrightarrow\;\;} \newcommand{\dep}[1]{\mathsf{dp}(#1)}% depth of a formula or prooftree % \newcommand{\dep}[1]{|#1|}% depth of a formula or prooftree \newcommand{\dom}{\mathsf{dom}} \newcommand{\domain}[1]{\mathsf{D_{#1}}} % \newcommand{\edom}{\mathsf{edom}} \newcommand{\eps}{\varepsilon} \newcommand{\EqCompat}{\mathsf{Eq}\hbox{-}\mathsf{Compat}} \newcommand{\Efq}{\mathsf{Efq}} \newcommand{\EfqLog}{\textsf{Efq-Log}} \newcommand{\Eig}{\mathsf{Eig}} \newcommand{\Elim}{\mathsf{Elim}} % \newcommand{\even}{\mathsf{Even}} \newcommand{\Evenodd}{\mathsf{EvenOdd}} \newcommand{\Eq}{\mathsf{Eq}} \newcommand{\EqI}{\mathsf{Eq}^{+}} \newcommand{\EqE}{\mathsf{Eq}^{-}} \newcommand{\FA}{\mathsf{FA}} \newcommand{\fini}{{\vbox{\hrule\hbox{% \vrule height1.3ex\hskip0.8ex\vrule}\hrule}}} \newcommand{\fix}{\mathsf{fix}} \newcommand{\hfilll}{\hspace*{0cm plus 1filll}} \newcommand{\If}{\mathsf{If}} \newcommand{\imp}{\Rightarrow} \newcommand{\Intro}{\mathsf{Intro}} \newcommand{\inquotes}[1]{``#1''} % \newcommand{\inquotes}[1]{"`#1"'} %german quotes \newcommand{\iterate}{\mathit{I}} \newcommand{\len}[1]{\mathsf{len}(#1)} \newcommand{\lev}[1]{\mathsf{lev}(#1)} \newcommand{\leafsize}[1]{\mathsf{ls}(#1)} %leaf size of a formula or prooftree \newcommand{\listappend}{\mathbin{{:}{+}{:}}} \newcommand{\listrev}{\mathsf{Rev}} \newcommand{\mr}{\,\mathbf{r}\,} \newcommand{\mrind}{\mathbf{r}} % \newcommand{\mr}{\,\underline{\mathbf{r}}\,} % \newcommand{\mr}{\,\underline{\mathbf{mr}}\,} \newcommand{\next}{\mathsf{nxt}} \newcommand{\nf}[1]{\mathsf{nf}(#1)} \newcommand{\odd}{\mathsf{Odd}} \newcommand{\pick}{\mathsf{pick}} \newcommand{\polylen}[1]{\vartheta(#1)} \newcommand{\pred}{\mathsf{P}} \newcommand{\PV}{\mathsf{PV}} \renewcommand{\qedsymbol}{{\ \fini\par}} % \renewcommand{\qedsymbol}{{\ \vbox{\hrule\hbox{% % \vrule height1.3ex\hskip0.8ex\vrule}\hrule}}\par} \newcommand{\rew}{\mathsf{rew}} \newcommand{\rf}[3]{\mathsf{rf}(#1;#2;#3)} \newcommand{\select}{\mathsf{sel}} \newcommand{\Set}{\mathbb{S}} \newcommand{\size}[1]{|#1|} %size of a prooftree or formula % \newcommand{\size}[1]{\mathsf{s}(#1)} %size of a prooftree or formula \newcommand{\Stab}{\mathsf{Stab}} \newcommand{\subst}[3]{#1[#2:= #3]} \newcommand{\substnarrow}[3]{#1[#2{:=}#3]} \newcommand{\Tikk}{\Diamond} \newcommand{\Total}{\mathsf{Total}} \newcommand{\tval}[1]{v(#1)} \newcommand{\typ}{\C{T}_{\to}} \newcommand{\weight}[1]{w(#1)}% weight of a type \newcommand{\ws}{\mathit{w}\!\mathit{s}} \newenvironment{enumeratei}{\begin{enumerate}[\upshape (i)]} {\end{enumerate}} % produces (i), (ii), etc, Cross-reference with \eqref \newenvironment{enumeratea}{\begin{enumerate}[\upshape (a)]} {\end{enumerate}} % produces (a), (b), etc, Cross-reference with \eqref \endinput %%% Local Variables: %%% mode: latex-math %%% End: minlog-4.0.99.20080304/doc/tutor.tex0000644000175000017500000017046610746604523015762 0ustar barralbarral% $Id: tutor.tex,v 1.19 2008/01/25 13:30:18 logik Exp $ \documentclass[12pt]{amsart} \usepackage{amssymb,enumerate} % for pdftex \usepackage[backref]{hyperref} \evensidemargin 5mm \oddsidemargin 5mm \textwidth 150mm \textheight 225mm \allowdisplaybreaks[4] \parskip 0pt plus 1pt \def\qedsymbol{{\ \vbox{\hrule\hbox{% \vrule height1.3ex\hskip0.8ex\vrule}\hrule}}\par} % minlog.mac contains the macros for all tex-files of the minlog % documentation in order to avoid code duplication and different % notations. \input{minlog.mac} \author{L.~Crosilla} \thanks{I would like to thank Professor Schwichtenberg for suggesting to write this tutorial and for useful comments as well as for providing the example on search. Monika Seisenberger commented on an early version of the tutorial proposing improvements and provided some more examples. In writing this tutorial I took inspiration from Martin Ruckert's Tutorial for an earlier version of \mi.} \title{A Tutorial for \mi, Version 4.0} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{document} \maketitle \section{Introduction} This is a tutorial for the interactive proof system \mi, version 4.0, developed by Helmut Schwichtenberg and members of the logic group at the University of Munich (http://www.mathematik.uni-muenchen.de/$\sim$logik/welcome.html). \mi\ is implemented in \textsc{Scheme} and runs under every \textsc{Scheme} version supporting the Revised$^5$ Report on the Algorithmic Language \textsc{Scheme}. \mi's favorite dialect is Petite Chez \textsc{Scheme} from Cadence Research Systems, which is freely distributed at the Internet address www.scheme.com. The \mi\ system can be downloaded from the Internet address: http://www.minlog-system.de/ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Getting started} The purpose of this Tutorial is to give a very basic introduction to the \mi\ system by means of some simple examples. For a thorough presentation of \mi\ and the motivation behind it the reader should consult the reference manual \cite{minlogman}. The papers listed in the \mi\ web page also provide a more detailed and advanced description of the system. In addition, the \mi\ distribution comes equipped with a directory of examples, to which the user is referred. The source code finally provides the ultimate reference. In order to use \mi, one essentially needs a shell in which to run \mi\ and also an editor in which to edit and keep a record of the commands for later sessions. In this tutorial we shall refer to \textsc{GNU Emacs}. While working with \textsc{Emacs}, the ideal would be to split the window in two parts, one with the file in which to store the commands, and the other with the \mi\ interactive section taking place. For this it is recommended to use the startup script \emph{\mdir/minlog} which takes files as (optional) arguments. For example\\ \begin{quote} \texttt{\mdir/minlog file.scm}\\ \end{quote} opens a new Emacs-Window which is split into two parts. The upper part containes the file \emph{(Buffer file.scm)} whereas the lower part shows the Minlogresponse \emph{(Buffer *minlog*)}. If you have already an open emacs window and do not want to open a new one then you can invoke minlog by loading the file \emph{minlog.el}: \begin{quote} \texttt{M-x load-file }\\ \texttt{\mdir/minlog.el}\\ \end{quote} REMARKS: In both cases the file \emph{init.scm} is loaded. In the description above I assumed tacitly that you are using an UNIX-like operating system and that minlog is installed in \mdir, where \~{} denotes as usual your home directory. % To split the window in two, one types \texttt{Ctrl-x 2}. % Then, to open a new file where to keep a record of the commands % one types \texttt{Ctrl-x Ctrl-f} and digits the name of the file, % for example \texttt{tutorial.scm}. Note that this file % should have the extension \texttt{scm}. % In the other window we can first run Petite Chez \textsc{Scheme} by typing: % \texttt{Meta-x run-petite}. % After \textsc{Scheme} has started, it will give its prompt, e.g.\ % ``\texttt{>}''. % To start \mi, one moves back to \texttt{tutorial.scm} and types % the following line (parenthesis included): % \bv % (load "minlogdir/init.scm") % \end{verbatim} % \noindent % where ``minlogdir'' has to be replaced with the directory in which \mi\ % is installed, with its relevant path. To execute a command of your file, one simply places the cursor at the end of it (after the closed parenthesis), and types \texttt{Ctrl-x Ctrl-e}. In general, \texttt{Ctrl-x Ctrl-e} will enable us to process any command we type in \texttt{tutorial.scm}, but one at the time. To process a whole series of commands, one can highlight the region of interest and type \texttt{Ctrl-c Ctrl-r}. We should also mention at this point that to undo one step, it is enough to give the command \texttt{(undo)}, while \texttt{(undo n)} will undo the last $n$ steps. Finally, type \texttt{(exit)} to end a \textsc{Scheme} session and \texttt{Ctrl-x Ctrl-c} to exit \textsc{Emacs}. %%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Propositional logic} \subsection{A first example} We shall start from a simple example in propositional logic. Suppose we want to prove the tautology: $$ (A \ob (B \ob C)) \ob ((A \ob B) \ob (A \ob C)). $$ In the following we shall make use of the convention of association to the right of the parentheses, which is assumed by \mi. Therefore the formula above becomes: $(A \ob B \ob C) \ob (A \ob B) \ob A \ob C$. It is very important, especially in the beginning, to pay the maximum attention to this and similar conventions to prevent mistakes. It is probably a good idea to rather exceed in parenthesis in the first examples. \mi\ will automatically delete the parenthesis which are not needed, therefore facilitating the reading. %%%%%%%%%%%%%%%%% {\bf Making a sketch of the proof}. We need in the first place to make an informal plan on how to prove this tautology. While making this plan we should consider the following fact. \mi\ (mainly) implements ``Goal Driven Reasoning'', also called ``Backward Chaining''. That means that we start by writing the conclusion we aim at as our goal and then, step by step, refine this goal by applying to it appropriate logical rules, until we reach the point of having no more goals to solve. In other words, \mi\ keeps a list of goals and updates it each time a logical rule is applied. A logical rule will have the effect of reducing the proof of a formula, the goal, to the proof of one or more other formulas, which will become the new goals. The proof is completed when the list of goals is empty. %In dealing with this first example we will make a plan of %the proof according to a ``backward reasoning'' %and try to go as close as possible to the formal argument. In this case the tautology we want to prove is a series of implications, hence we will have to make use of basic rules for ``deconstructing'' implications. The first move will then be to assume that the antecedent of the outmost implication is true and try to derive the consequent from it. That is, we assume $A \ob B \ob C$ and want to derive $(A \ob B) \ob A \ob C$; hence we set the latter as our new goal. Then we observe that $(A \ob B) \ob A \ob C$ is an implication and can be treated in the same way; so we now assume $A \ob B \ob C$ and $A \ob B$ and want to derive $A \ob C$. Clearly, we can make the same step once more and obtain $A \ob B \ob C$, $A \ob B$ and $A$ as our premises and try to derive $C$ from them. Now we observe that in order to prove $C$ under the assumption $A \ob B \ob C$, we simply need to prove both $A$ and $B$ under the same assumption. Obviously $A$ is proved, as it is one of our assumptions, and $B$ immediately follows from $A \ob B$ and $A$. %%%%%%%%%%%%%%%%% {\bf Writing the formula}. Once we have an idea on how to prove the formula, we can start implementing the proof in \mi. The initial step would then be to write the formula in \mi. For this purpose, we declare three predicate constants $A$, $B$ and $C$ by writing: \bv(add-predconst-name "A" "B" "C" (make-arity))\end{verbatim} The expression \texttt{(make-arity)} produces the empty arity for $A$, $B$ and $C$ (see \cite{minlogman} for a description of \texttt{(make-arity)}). \mi\ will then write: \bv ; ok, predicate constant A: (arity) added ; ok, predicate constant B: (arity) added ; ok, predicate constant C: (arity) added > \end{verbatim} Subsequently, we write the formula and give it a name, say \texttt{distr} (for distributivity of implication): \bv (define distr (pf "(A -> B -> C) -> (A -> B) -> A -> C")) \end{verbatim} This command has the effect of defining a new variable \texttt{distr} and attaching to it the \textsc{Scheme} term which is produced by the function \texttt{pf} applied to the formula we entered. In fact, the function {\texttt pf}, short for ``parse formula'', takes a string as argument and returns a \textsc{Scheme} term. This \textsc{Scheme} term is the way \mi\ stores the string, or, equivalently, it is the {\em internal form} in \mi\ of our formula, and \texttt{distr} is a name referring to it. By typing \texttt{distr}, one can see the value of this variable. %%%%%%%%%%%%%%%%% {\bf Implementing the Proof}. We now want to prove this formula with \mi. In order to do this we clearly need to set the formula as our goal. {\bf Setting the goal}. Typically, the goals in a proof will be numbered and the top goal will be denoted by the number 1, preceded by a question mark. To set \texttt{distr} as our goal, we type: \bv (set-goal distr) \end{verbatim} \mi\ will print: \bv ; ?_1: (A -> B -> C) -> (A -> B) -> A -> C > \end{verbatim} %%%%%%%%%%%%%%%%% {\bf The proof}. We have seen in the sketch of the proof that the first step for proving the tautology is to assume the antecedent of the implication and turn the consequent into our new goal. This is simply done by writing: \bv (assume 1) \end{verbatim} Here the number \texttt{1} is needed in order to identify and name the hypothesis. \mi\ will denote this hypothesis by \texttt{1}: \begin{verbatim} ; ok, we now have the new goal ; ?_2: (A -> B) -> A -> C from ; 1:A -> B -> C > \end{verbatim} We repeat the assume command to decompose the implication in the new goal: %\newpage \bv (assume 2)\end{verbatim} \bv ; ok, we now have the new goal ; ?_3: A -> C from ; 1:A -> B -> C ; 2:A -> B > \end{verbatim} And we decompose the goal once more: \bv (assume 3) \end{verbatim} \bv ; ok, we now have the new goal ; ?_4: C from ; 1:A -> B -> C ; 2:A -> B ; 3:A > \end{verbatim} We now need to start using our assumptions. As already mentioned, in order to prove {\texttt C} it is enough to prove both {\texttt A} and {\texttt B}, by assumption \texttt{1}. Therefore we write: \sloppy \texttt{(use 1)}. This has the effect of splitting the goal in two distinct subgoals (note how the subgoals are numbered): \bv (use 1) ; ok, ?_4 can be obtained from ; ?_6: B from ; 1:A -> B -> C ; 2:A -> B ; 3:A ; ?_5: A from ; 1:A -> B -> C ; 2:A -> B ; 3:A > \end{verbatim} Then we write: \bv (use 3) ; ok, ?_5 is proved. The active goal now is ; ?_6: B from ; 1:A -> B -> C ; 2:A -> B ; 3:A > \end{verbatim} And conclude the proof by: %\newpage \bv (use 2) ; ok, ?_6 can be obtained from ; ?_7: A from ; 1:A -> B -> C ; 2:A -> B ; 3:A > (use 3) ; ok, ?_7 is proved. Proof finished. > \end{verbatim} To see a record of the complete proof, simply type \texttt{(display-proof)}. Other useful commands are \texttt{(display-pterm)} and \texttt{(display-proof-expr)}. See the manual for a description of the various \texttt{display} commands available in \mi. We observe that an alternative to using the \texttt{define} command at the beginning of the proof and then separately setting the goal, would be to directly set the formula one wants to prove as a goal, that is writing \texttt{(set-goal (pf "(A -> B -> C) -> (A -> B) -> A -> C"))}. Note also that the first three \texttt{assume} commands could be replaced by only one, i.e.: \texttt{(assume 1 2 3)}. In alternative to the last two \texttt{use} commands, we could have given only one command: \texttt{(use-with 2 3)}, which amounts to applying a cut to the premises 2 and 3. A final remark to the extent that in case of rather complex proofs, it is convenient to name specific hypothesis, in place of making use of bare numbers. One than can simply use the \texttt{assume} commands, followed by the name of the assumption in double quotes. Before starting to read the next section, it is advisable to consult the reference manual \cite{minlogman} for a compendium of the commands utilized in this example. It is worth noticing that in general these commands have a wider applicability than their usage as now presented. %%%%%%%%%%%%%%% %%%%%%%%%%%%%%% \subsection{A second example: classical logic} \mi\ implements minimal logic. If we want to prove a proposition which is true in classical logic but not in minimal logic, we explicitly need to state and use principles which are classical in nature. In the following example we shall use Stability, which is added to \mi\ as a global assumption. Roughly speaking, a global assumption is a proposition which can be recalled at any time, if needed, and whose proof does not concern us at the moment (hence it can also be an assumption with no proof). In order to check which global assumptions we have at our disposal we type: \texttt{(display-global-assumptions)}. To check a particular global assumption whose name we already know, we write the above command followed by the name of the assumption we want to check, e.g.: \texttt{(display-global-assumptions "Stab-Log")}. Of course we can also introduce our own global assumptions and remove them at any time (see the reference manual for the specific commands). Stability is the logical law for which, for any proposition $A$, $\lnot \lnot A \ob A$ holds. In \mi, $\lnot A$ is defined to be $A \ob \bot$, i.e. $A$ implies falsum. Stability is therefore the following proposition: $((A \ob \bot) \ob \bot) \ob A$. Suppose we want to prove the tautology: $$ ((A \ob B) \ob A) \ob A, $$ which is known as {\em Peirce formula}. For this second example, we will assume that the reader has prepared her sketch of the proof, and we will only give an intuitive idea of the proof, preferring to rather concentrate on the \mi\ interaction, which will be given in its complete form. As in the previous example, we observe first of all that the goal is an implication, hence we will assume its antecedent, $(A \ob B) \ob A$, and try to prove its consequent, $A$. Now classical logic comes into play, because in order to prove $A$, we will assume that its negation holds and try to get a contradiction from it. This will be achieved by use of Stability. We further note that in order to make the argument work, we will need at some stage to resort to another global assumption, the principle of ``ex falsum quodlibet''. This principle allows one to conclude any formula from a proof of falsum, i.e. it is the principle: $\bot \ob A$, for arbitrary $A$. We start by setting the goal and assuming the antecedent of the implication: \begin{verbatim} (add-predconst-name "A" "B" (make-arity)) (define peirce-formula (pf "((A -> B) -> A) -> A")) (set-goal peirce-formula) (assume 1) \end{verbatim} We obtain: \bv ; ok, we now have the new goal ; ?_2: A from ; 1:(A -> B) -> A > \end{verbatim} We now apply Stability, which is stored in \mi\ with the name \texttt{Stab-Log}, so that the goal $A$ will be replaced by its double negation: $(A \ob \bot) \ob \bot$. Note that $\bot$ is called \texttt{bot} in \mi. \bv (use "Stab-Log") ; ok, ?_2 can be obtained from ; ?_3: (A -> bot) -> bot from ; 1:(A -> B) -> A > \end{verbatim} Since this is an implication, we let: \bv (assume 2) ; ok, we now have the new goal ; ?_4: bot from ; 1:(A -> B) -> A ; 2:A -> bot > \end{verbatim} We then use hypothesis 2 to replace the goal $\bot$ by $A$. \bv (use 2) ; ok, ?_4 can be obtained from ; ?_5: A from ; 1:(A -> B) -> A ; 2:A -> bot > \end{verbatim} Also $A$ can be replaced by $A \ob B$ by use of hypothesis 1. Subsequently, we can assume the antecedent of the new goal, $A$, and call it ``hypothesis 3'': \bv (use 1) ; ok, ?_5 can be obtained from ; ?_6: A -> B from ; 1:(A -> B) -> A ; 2:A -> bot > (assume 3) ; ok, we now have the new goal ; ?_7: B from ; 1:(A -> B) -> A ; 2:A -> bot ; 3:A > \end{verbatim} Now we can make use of the principle of ex falsum quodlibet: if we want to prove $B$, we can instead prove falsum, since from falsum anything follows, and in particular $B$. Therefore our goal can be updated to $\bot$ by the following instance of \texttt{use}: \bv (use "Efq-Log") ; ok, ?_7 can be obtained from: ; ?_8: bot from ; 1:(A -> B) -> A ; 2:A -> bot ; 3:A > \end{verbatim} The next two steps are obvious. \bv (use 2) ; ok, ?_8 can be obtained from ; ?_9: A from ; 1:(A -> B) -> A ; 2:A -> bot ; 3:A > (use 3) ; ok, ?_9 is proved. Proof finished. > \end{verbatim} \subsection{Conjunction} To conclude this section on propositional logic, we give a short example of a tautology which uses conjunction. We want to prove $$ A \land B \ob B \land A $$ We shall simply record the code of our \mi\ proof, asking the reader to check \mi's reply at each step. A few comments will be added at the end. \begin{verbatim} (add-predconst-name "A" "B" (make-arity)) (set-goal (pf "(A & B) -> (B & A)")) (assume 1) (split) (use 1) (use 1) \end{verbatim} The command \texttt{(split)} operates on the goal if it is a conjunction and it has the effect of splitting it into its two components. The command \texttt{use} is utilized to obtain the left (respectively the right) conjunct in the assumption and ``\texttt{use}'' it to derive the goal. The reader is encouraged to try and prove other examples of tautologies. %%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%% \section{Predicate logic} \subsection{A first example.} We now exemplify how to prove a statement in predicate logic. Suppose we want to prove that every total relation which is symmetric and transitive is reflexive. For simplicity we shall work with natural numbers. We hence want to prove the following statement: \begin{align*} & \forall n\, \forall m\, (R n m \ob R m n) \,\land\, \forall n\, \forall m\, \forall k\, (R n m \;\land\; R m k \ob R n k)\\ & \ob \forall n\, (\exists m\, R n m \ob R n n), \end{align*} \noindent where $n$, $m$, $k$ vary on natural numbers, while $R$ is a binary predicate on natural numbers. Before starting to prove the claim, we observe that we can equivalently espress it by another formula which is simpler to prove in \mi, e.g. by one in which the conjunctions have been replaced by appropriate implications. That is, we can instead prove the following formula: \begin{align*} & (\forall n,\, m. R n m \ob R m n) \ob (\forall n,\, m,\, k. R n m \ob R m k \ob R n k)\\ & \ob \forall n,\, m. R n m \ob R n n, \end{align*} where the ``.'' is used to indicate the scope of a quantifier, with the convention that it binds as far as possible. The strategy of first simplifying the goal may in some cases allow one to considerably reduce the amount of time needed to prove a statement. However, there might be cases in which one prefers to prove a more complex formula, for example when proving a lemma which is then used in the proof of a more intricate theorem. For completeness and for a comparison, we shall also record a proof of the original goal, at the end of this section. Since the predicate $R$ is required to vary on natural numbers, we first of all load a file, already available with the distribution, which introduces the algebra of natural numbers and some operations on them, like for example addition. This is obtained by typing: \bv (libload "nat.scm") \end{verbatim} We can now introduce the constant $R$. We also want to facilitate our work a bit further and separately introduce the two assumptions $\forall n\, m. R n m \ob R m n$ and $\forall n\, m\, k. R n m \ob R m k \ob R nk$. We do this by means of a \texttt{define} command, which enables us to give a name to each assumption. We then use these name to make a formula in implication form. In the following \texttt{py} is the analogous for types of the function parse formula. Note also that the file \texttt{nat.scm} already introduces $m$, $n$ and $k$ as ``default'' variables on the natural numbers, hence we do not need to explicitly declare them here, too. \bv (add-predconst-name "R" (make-arity (py "nat") (py "nat"))) (define Symm (pf "all n,m.R n m -> R m n")) (define Tran (pf "all n,m,k.R n m -> R m k -> R n k")) \end{verbatim} We now state the goal: \bv (set-goal (mk-imp Symm Tran (pf "all n,m.R n m -> R n n"))) ; ?_1: (all n,m.R n m -> R m n) -> (all n,m,k.R n m -> R m k -> R n k) -> all n,m.R n m -> R n n > \end{verbatim} Note that in this specific case, we could have directly written the two formulas as antecedents of the implication, avoiding the detour through a \texttt{define} command. In case of more complex formulas, however, or when we need to use the same formulas for various proofs through one session, this strategy can be quite useful. %\bv %(display-global-assumptions) %\end{verbatim} We now observe that the goal is an implication, so that the first step would be to write \texttt{(assume "Symm" "Tran")}. By this command we would obtain a universally quantified formula and we would then need to proceed to eliminate the quantifiers. This can be accomplished by another \texttt{assume} command in which we specify two natural numbers. For simplicity we here fix the natural numbers $n$ and $m$. So we would write \texttt{(assume "n" "m")}. This would produce an implication which would also need to be eliminate by another \texttt{assume} command, say \texttt{(assume 1)}. We can put all these commands together by writing: \bv (assume "Symm" "Tran" "n" "m" 1) ; ok, we now have the new goal ; ?_2: R n n from ; Symm:all n,m.R n m -> R m n ; Tran:all n,m,k.R n m -> R m k -> R n k ; n m 3:R n m > \end{verbatim} The next move is to make use of our assumptions. It is clear that if we take $k$ to be $n$ in \texttt{Tran}, then the goal can be obtained by an instance of \texttt{Symm}, and the proof is easily completed. We here utilize \texttt{use} by additionally providing a term, \texttt{"m"}, which will instantiate the only variable which can not be automatically inferred by unification. In the following \texttt{pt} stands for parse term. \bv (use "Tran" (pt "m")) ; ?_4: R m n from ; Symm:all n,m.R n m -> R m n ; Tran:all n,m,k.R n m -> R m k -> R n k ; n m 3:R n m ; ?_3: R n m from ; Symm:all n,m.R n m -> R m n ; Tran:all n,m,k.R n m -> R m k -> R n k ; n m 3:R n m > \end{verbatim} The \texttt{use} command has the effect of replacing the current goal with two new goals. These are obtained from \texttt{Tran} by instantiating the quantifiers with \texttt{n}, \texttt{m} and \texttt{n} (the two \texttt{n} being inferred by unification) and then by replacing the goal with the antecedents of the resulting instance of \texttt{Tran}. We can now write: \bv (use 3) > ; ok, ?_3 is proved. The active goal now is ; ?_4: R m n from ; Symm:all n,m.R n m -> R m n ; Tran:all n,m,k.R n m -> R m k -> R n k ; n m 3:R n m > \end{verbatim} We finally employ \texttt{Symm} and another \texttt{use}: \bv (use "Symm") ; ok, ?_4 can be obtained from ; ?_5: R n m from ; Symm:all n,m.R n m -> R m n ; Tran:all n,m,k.R n m -> R m k -> R n k ; n m 3:R n m > (use 3) ; ok, ?_5 is proved. Proof finished. > \end{verbatim} %%%%%%%%%%%%%%%%%%%%% {\bf The same example.} We here present a \mi\ proof of the original goal of the previous example, as it allows us to exemplify the use of some new commands. We shall leave the proof uncommented and make a few remarks at the end. The reader will have to examine the proof and check \mi's interaction. \bv (libload "nat.scm") (add-predconst-name "R" (make-arity (py "nat") (py "nat"))) (set-goal (pf "(all n,m. R n m -> R m n) & (all n,m,k. R n m & R m k -> R n k) -> all n. ex m R n m -> R n n")) (assume 1) (inst-with 1 'left) (inst-with 1 'right) (drop 1) (name-hyp 2 "Symm") (name-hyp 3 "Tran") (assume "n" 4) (ex-elim 4) (assume "m" 5) (cut (pf "R m n")) (assume 6) (use-with 3 (pt "n") (pt "m") (pt "n") "?") (drop "Symm" "Tran" 4) (split) (use 5) (use 6) (use-with 2 (pt "n") (pt "m") 5) \end{verbatim} The \texttt{use-with} command is similar to the \texttt{use} command, but when applied to a universal quantifier it requires to explicitly specify the terms one wants to instantiate. In its first occurrence in the proof above we write \texttt{"?"} to indicate that \mi\ will have to replace the current goal with a new goal. In the second occurrence of \texttt{use-with}, \mi\ will instantiate as specified the universal quantifiers in premise 2 and then use hypothesis 5 to prove the goal. The command \texttt{inst-with} is analogous to \texttt{use-with}, but operates for forward reasoning; hence it allows one to simplify the hypothesis, instead of the conclusion. In this case, \texttt{(inst-with 1 'left)} has the effect of producing the left component of the conjunction which constitutes the first hypothesis. Similarly for the right component. \texttt{ex-elim} eliminates an existential quantifier and produces a new universally quantified goal. As to \texttt{cut}, this command enables one to introduce new goals. \texttt{(cut A)} has the effect of replacing goal $B$ by two new goals, $A \ob B$ and $A$. In the proof above we have also made use of the commands \texttt{drop} and \texttt{name-hyp}. The first allows one to remove one or more hypothesis from the present context, to make the proof more readable. In fact, it simply replaces the current goal with another goal in which the hypothesis `dropped' are not displayed anymore (but they are not removed in general, as should be clear from the example above). The second command has similar `cosmetic' purposes, and allows one to rename a specific hypothesis and hence to work with names given by the user instead of numbers produced by default. Both these commands result especially useful in the case of long and intricate proofs. %%%%%%%%%%%%%%%%%%%%% \subsection{Another example with classical logic} We conclude this section on predicate logic with a final example of a formula which requires classical logic. We want to prove the following: $$ \exists^c x\,. Q x \ob \forall y\, Q y, $$ where $Q$ is now a unary predicate, and we do not require $Q$ to range on the natural numbers. In addition, the existential quantifier, $\exists^c$, is here a classical existential quantifier, to be distinguished from the existential quantifier we encountered in the previous example. A classical quantifier $\exists^c x$ is nothing more than an abbreviation for $\lnot\, \forall x\, \lnot$. The formula we want to prove is known as the ``drinker'' formula, as it says something like: ``in a bar, there is a person such that if she drinks then everybody drinks''. One could state the goal either by direct use of the classical existential quantifier (called \texttt{excl} in \mi) or by replacing it with its meaning by use of the universal quantifier. In the first case one would write: \bv (set-goal (pf "excl x. Q x -> all y Q y")) \end{verbatim} \mi\ would then automatically replace the \texttt{excl} quantifier by its meaning when performing the first command. Otherwise we can state the formula as follows: \bv (set-goal (pf "(all x.(Q x -> all y Q y) -> bot) -> bot")) \end{verbatim} In our implementation we shall introduce $Q$ as a predicate on an arbitrary type, say $\alpha$. \mi\ already has a type variable \texttt{alpha} as default, and we shall use it in the following (hence no declaration of the type \texttt{alpha} is needed). As the proof is quite simple and does not introduce any new notion, we only write the code, letting the reader verify it in \mi. \bv (add-predconst-name "Q" (make-arity (py "alpha"))) (add-var-name "x" "y" (py "alpha")) (set-goal (pf "(all x.(Q x -> all y Q y) -> bot) -> bot")) (assume 1) (use 1 (pt "x")) (assume 2 "y") (use "Stab-Log") (assume 3) (use 1 (pt "y")) (assume 4) (use "Efq-Log") (use-with 3 4) \end{verbatim} We can store the proof as a theorem to be used later on with the command: \bv (save "Drinker") \end{verbatim} %%%%%%%%%%%%%%%%% \section{Induction} {\bf Induction on the natural numbers.} We now present a simple proof which exemplifies the use of induction on the natural numbers. The formula to prove is the following: $$ \forall n,\, m.\, n + m = m + n. $$ This can be proved by induction on the natural numbers as follows: we fix an $n$ and show that $n + 0 = 0 + n$ and also that if $n + m = m + n$ then $n + Succ(m) = Succ(m) + n$. As before we will make use of a file already available in the distribution which contains the definitions of the algebra of the natural numbers and also of the operations of addition and multiplication on the natural numbers. These are defined by means of computation and rewrite rules. If the file is not already loaded\footnote{It is a good practice to run a new \mi\ session when loading new files which could turn out to be incompatible with previously loaded files or previous definitions.}, we type: \bv (libload "nat.scm") \end{verbatim} We set the goal by letting: \bv (set-goal (pf "all n,m.n + m = m + n")) \end{verbatim} We now instantiate the first quantifier with \texttt{n}: \bv (assume "n") ; ok, we now have the new goal ; ?_2: all m n+m=m+n from ; n > \end{verbatim} We apply induction by simply typing: \bv (ind) \end{verbatim} The command \texttt{ind} requires a universally quantified goal and applies induction to it in accord to the definition of the specific algebra type (in this case $nat$). \mi's reply will be something like this: \bv ; ok, ?_2 can be obtained from ; ?_4: all n15.n+n15=n15+n -> n+Succ n15=Succ n15+n from ; n ; ?_3: n+0=0+n from ; n > \end{verbatim} We then replace the goal with its normal form by letting: \bv (normalize-goal) ; ok, the normalized goal is ; ?_5: T from ; n > \end{verbatim} The latter command can be abbreviated with \texttt{ng} and it will normalize the goal by using the computation rules for \texttt{+} introduced in the file \texttt{nat.scm}. The goal is now proved by simply appealing to the axiom \texttt{Truth-Axiom} (which can be used as an argument to the \texttt{use} command). \bv (use "Truth-Axiom") ; ok, ?_5 is proved. The active goal now is ; ?_4: all n15.n+n15=n15+n -> n+Succ n15=Succ n15+n from ; n > \end{verbatim} We can now instantiate the quantified variable \texttt{n15} by \texttt{m}: \bv (assume "m" 1) ; ok, we now have the new goal ; ?_6: n+Succ m=Succ m+n from ; n m 1:n+m=m+n > \end{verbatim} Finally we normalize the goal and use the assumption: \bv (ng) ; ok, the normalized goal is ; ?_7: n+m=m+n from ; n m 1:n+m=m+n > \end{verbatim} \bv (use 1) ; ok, ?_7 is proved. Proof finished. > \end{verbatim} To see the proof: \bv (display-proof) \end{verbatim} {\bf Another example.} We now present another example of induction on the natural numbers, which introduces some additional features of \mi. Suppose we want to prove that for all natural numbers $n$, $2 \cdot n$ is even. We define two new {\em program constants} (see \cite{minlogman}) \texttt{Odd} and \texttt{Even} which take a natural number as argument and give a boolean (true or false) as output. The behaviour of these program constants can be specified by means of {\em computation rules}. In this case the computation rules will simultaneously characterize \texttt{Odd} and \texttt{Even}. The command used to introduce a new program constant is \texttt{add-program-constant}. It will require the name of the constant and its type; further arguments may be the degree of totality, the token type (e.g.\ \texttt{const}) and the arity (see \cite{minlogman}). In the following, the type of the new constants \texttt{Odd} and \texttt{Even} will be introduced by means of the command \texttt{mk-arrow}, which produces an arrow type. The behaviour of a new program constant can be specified by introducing one or more computation rules for it. This is accomplished by use of the command \texttt{add-computation-rule}, having two arguments: a left hand side and a right hand side. The right hand side specifies the result of the computation rule for the argument indicated in the left hand side. The following implementation of the example should clarify how to use these commands. \bv (libload "nat.scm") (add-program-constant "Odd" (mk-arrow (py "nat") (py "boole")) 1) (add-program-constant "Even" (mk-arrow (py "nat") (py "boole")) 1) (add-computation-rule (pt "Odd 0") (pt "F")) (add-computation-rule (pt "Even 0") (pt "T")) (add-computation-rule (pt "Odd (Succ n)") (pt "Even n")) (add-computation-rule (pt "Even (Succ n)") (pt "Odd n")) (set-goal (pf "all n.Even (2*n)")) (ind) (ng) (use "Truth-Axiom") (ng) (assume "n" 1) (use 1) \end{verbatim} %%%%%%%%%%%%%%%%%%%%%%%% {\bf Induction on Lists.} The following example is an exercise on lists over an arbitrary type $\alpha$. Also this example illustrates the use of induction, but since we now deal with {\em infinitary algebras} (see \cite{minlogman}) the task will result a bit harder than when working with the natural numbers. Together with the file \texttt{nat.scm}, we now need to load also the file \texttt{list.scm}, which contains basic definitions and operations on lists over an arbitrary type $\alpha$. We recommend to go through the file before starting to work at this example. We shall introduce a function, $\listrev$, on lists which has the effect of reverting a list, and then prove the following: $$ \forall y,z. Equal (\listrev\, (y \listappend z)) ((\listrev\, z) \listappend (\listrev\, y))), $$ where $y$ and $z$ are lists over an arbitrary type $\alpha$ and $\listappend$ denotes the append function on lists as defined in \texttt{list.scm}. Note that we here need to use the predicate $Equal$ instead of $=$ because the algebra of lists over $\alpha$ is an infinitary algebra and hence equality for it has to be treated as a predicate constant with appropriate axioms (see \cite{minlogman}). Before stating the goal we need to define $\listrev$. This has to be defined by induction, by first giving its value for the empty list and then saying how it applies to a non-empty list. The two defining conditions for $\listrev$ will be the following: \begin{align*} % \listrev\,(\nil\, \alpha) &= (\nil\, \alpha), \\ \listrev\,(a::y) &= (\listrev\, y) \listappend (a{:}) % \end{align*} where, according to the notation in \texttt{list.scm}, $\nil\, \alpha$ denotes the empty list over the type $\alpha$, $a::y$ denotes the list obtained by adding the object $a$ of type $\alpha$ to the list $y$ (over $\alpha$), while $a{:}$ is the one element list obtained from $a$. First of all we load the files \texttt{nat.scm} and \texttt{list.scm}. \bv (libload "nat.scm") (libload "list.scm") \end{verbatim} To simplify our work, we declare some variables of the appropriate types: \bv (add-var-name "a" "b" (py "alpha")) (add-var-name "u" "v" "w" "s" (py "list alpha")) \end{verbatim} We then declare a new program constant, \texttt{Rev}, which takes a list over \texttt{alpha} as argument and gives another list over \texttt{alpha} as output. \bv (add-program-constant "Rev" (py "list alpha => list alpha") 1 'const 1) \end{verbatim} We can now introduce two computation rules which correspond to the two conditions for $\listrev$ presented above. \bv (add-computation-rule (pt "(Rev alpha) (Nil alpha)") (pt "(Nil alpha)")) (add-computation-rule (pt "(Rev alpha) (a::w)") (pt "((Rev alpha) w) :+: (a:)")) \end{verbatim} Now we could start proving the goal: \bv all w,s. Equal ((Rev alpha)(w :+: s)) (((Rev alpha) s) :+: ((Rev alpha) w))) \end{verbatim} With the purpose of simplifying our proof, we now deliberately introduce three ``ad hoc'' global assumptions which will be used as lemmata in the proof of the main goal. We prove one of these global assumptions at the end of the main proof, and leave the others as an exercise for the reader. We can use \texttt{aga} as an abbreviation for \texttt{add-global-assumption} \bv (aga "Reff" (pf "all a,w,s. (Equal w s) -> Equal (a::w) (a::s)")) (aga "Eqrev" (pf "all w,s,u. Equal w s -> Equal (w :+: u) (s :+: u)")) (aga "Asrev" (pf "all v,s,w,u. Equal v ((s :+: w) :+: u) -> Equal v (s :+: (w :+: u))")) \end{verbatim} We now prove a short lemma which says that any list, $z$, is equal to the list obtained by appending the empty list to $z$. \bv ; Lemma (set-goal (pf "all s. Equal s (s :+:(Nil alpha))")) (ind) (ng) (use-with "Eq-Refl" (py "list alpha") (pt "(Nil alpha)")) (assume "a" "w" 1) (use "Reff") (use 1) (save "AppendEmpty") \end{verbatim} We note that by writing \texttt{(use-with "Eq-Refl" (py "list alpha") (pt "(Nil alpha)"))}, we make use of the axiom of reflexivity for \texttt{Equal}, which is called \texttt{Eq-Refl}, and apply it to the term \texttt{Nil alpha} of type \texttt{list alpha}. The lemma and the global assumptions stated above can now be used to prove the goal. \bv (set-goal (pf "all w,s. (Equal ((Rev alpha)(w :+: s)) (((Rev alpha) s) :+: ((Rev alpha) w)))")) (ind) (ng) (assume "s") (use "AppendEmpty") (assume "a" "w") (ng) (assume 1 "s") (use-with "Asrev" (pt "((Rev alpha)(w :+: s) :+: a:)") (pt "((Rev alpha) s)") (pt "((Rev alpha) w)") (pt "a:") "?") (use-with "Eqrev" (pt "((Rev alpha)(w :+: s))") (pt "((Rev alpha) s :+: (Rev alpha) w)") (pt "a:") "?") (use 1) \end{verbatim} Finally, we show how to prove the global assumption \texttt{Eqrev}, as it is a typical application of equality reasoning. \bv (set-goal (pf "all w,s,u. Equal w s -> Equal (w :+: u) (s :+: u)")) (strip) (simp 1) (use-with "Eq-Refl" (py "list alpha") (pt "s:+:u")) \end{verbatim} Note the command \texttt{strip} which moves at one time all universally quantified variables and hypotheses of the current goal into the context. \texttt{simp} simplifies a proof which involves the predicate \texttt{Equal}, by substituting 'Equal' terms in the goal. %%%%%%%%%% \section{search} \mi\ allows for automatic proof search. There are two distinct facilities for performing an automatic search in \mi. The first is given by the command \texttt{(prop)} and exemplifies Hudelmaier-Dyckhoff's search for the case of minimal propositional logic (see e.g.\ \cite{Hudelmaier89}, \cite{Dyckhoff92}). The second is given by the command \texttt{(search)} and embodies a search algorithm based on Miller's \cite{Miller91b} and on ideas of Berger (see \cite{minlogman} for details on the algorithm and for some differences with Miller's algorithm). The command \texttt{search} enables us to automatically find a proof for a wider class of formulas compared with \texttt{prop}, since it also works for formulae with quantifiers (see the reference manual for a detailed description of the class of formulae dealt with by \texttt{search}). {\bf Prop.} One is advised to use \texttt{prop} for propositional (minimal) logic. \texttt{prop} will first look for a proof in propositional minimal logic. If it fails to find a proof for the given proposition, it will try with intuitionistic logic, by adding appropriate instances of ``Ex falsum quodlibet''. If this search also gives no positive answer, it will try to find a proof in classical logic, by adding appropriate instances of Stability. To apply this search algorithm, one simply needs to type \texttt{(prop)} after stating the goal or at any point of a proof from which one guesses that (minimal) propositional logic would suffice. If \mi\ finds a proof, one can then display it by means of any of the display commands available for proofs; for example by writing \texttt{dnp} (which is a shortcut for \texttt{display-normalized-proof}). The reader is encouraged to apply the command to the first examples of propositional logic of this tutorial, including ``Peirce's law''. {\bf Search.} The \texttt{search} command is more powerful then \texttt{prop} since it allows us to automatically find proofs also for some quantified formulas, but it only operates a search in minimal logic. If one wants to apply this command to a classical formula like ``Peirce's law'', one could for example add the appropriate instance of ``Ex falsum quodlibet'' and of Stability as antecedents of the goal. In case of more complex proofs, in which one can not easily modify the actual goal, an alternative would be to avail oneself of a more complete use of the \texttt{search} command which allows us to specify some global assumptions or theorems or even hypothesis from the given context which one would like to be used in the resulting proof.\footnote{It is important to notice that in \mi\ we do not quantify over propositions, hence one needs to exercise some care in choosing the global assumptions to be used in \texttt{search}. In the case of ``Peirce's law'', one would need to pass e.g.\ from the global assumption \texttt{Efq-Log} to a theorem which is a specific instance of it, and make use of the latter in \texttt{search}.} Since the search space in the case of quantified formulas can become really vast, this possibility of declaring specific assumptions to be used in the proof can be very useful, especially if we also state the maximum number of multiplicities we allow for each assumption (i.e. the maximum number of times each assumption can be used in the proof). One can also use this same device to exclude the use of a specific assumption in the proof, simply by letting its multiplicity to be 0. To use the plain version of \texttt{search}, one simply writes \texttt{(search)}. See the reference manual for the precise syntax of the command \texttt{search} when other assumptions are invoked with the respective multiplicities. {\bf An example with \texttt{search}.} As an example for the use of \texttt{search}, we apply the algorithm to the following problem: if $f$ is a continuous function then $f$ composed with itself is also a continuous function. We suggest to solve the problem as follows. \bv (add-var-name "x" "y" (py "alpha")) (add-tvar-name "beta") (add-var-name "U" "V" "W" (py "beta")) (add-predconst-name "in" (make-arity (py "alpha") (py "beta"))) (add-var-name "f" (py "alpha=>alpha")) (set-goal (pf "all f.(all x,V. in (f x) V -> excl U. in x U & all y. in y U -> in (f y) V) -> all x,W. in (f(f x)) W -> excl U. in x U & all y.in y U -> in (f(f y)) W")) (search) (dnp) \end{verbatim} Note that one can switch on a verbose search by letting: \texttt{(set! VERBOSE-SEARCH \#t)} before calling \texttt{search}. In this way one can see the single steps performed by the search algorithm and detect possible difficulties in finding a proof. %%%%%%%%%% \section{Conclusion} We finally note that one of the main motivations for developing \mi\ and one of its most important features is program extraction, that is, the possibility of using \mi\ to extract functional programs from proof terms. However, the treatment of program extraction would require a consistent extension of this tutorial and it is hence omitted for the time being. The reader willing to know more about this topic is encouraged to consult the reference manual and the relevant papers listed on the \mi\ web page. To conclude, we recall that all the examples of this tutorial can be found in the file \texttt{tutorial.scm} located in the examples directory of the \mi\ distribution. \newpage \section{Useful Commands for \textsc{Emacs} and Petite \textsc{Scheme}} \begin{center} \begin{large} \textsc{Emacs}\\ \end{large} \end{center} \vspace{3 mm} \begin{itemize} \item Start \textsc{Emacs}: \texttt{Emacs \&}\\ \item Leave \textsc{Emacs}: C-x C-c\\ \item Split a window in two: \texttt{C-x 2}\\ \item Move to another Buffer: \texttt{C-x b} (then specify the Buffer's name)\\ \item Move to another window: \texttt{C-x o}\\ \item Load a file: \texttt{C-x C-f} (then give a name of a file with extension \texttt{.scm})\\ \item Save a file: \texttt{C-x C-s}\\ \item Exit from the Minibuffer: \texttt{C-g}\\ \end{itemize} \begin{center} \begin{large} \textsc{Scheme}\\ \end{large} \end{center} \vspace{3 mm} \begin{itemize} \item Load \textsc{(Petite) Scheme}: \texttt{M-x run-petite}\\ \item Evaluate a \textsc{Scheme} expression: \texttt{C-x C-e}\\ \item Evaluate a region: mark the region and then \texttt{C-c C-r}\\ \item Kill a process: \texttt{C-c C-c} \\ \item Leave the Debug: \texttt{r}\\ \item End a \textsc{Scheme} session: \texttt{(exit)}\\ \item Comment: \texttt{;}\\ \end{itemize} \begin{enumerate}[] \item \texttt{C = Control} or \texttt{Strg} \item \texttt{M = Meta} or \texttt{Esc} or \texttt{Alt}. \end{enumerate} \newpage \section{Useful Commands: \mi} The following is a list of commands which could be used in a `standard' interactive proof with \mi. Rather than explaining the commands in detail, we shall write them down, often with a short description of their use gathered from the reference manual. The reader is anyhow advised to check the full details in the reference manual. \section{Some declarations needed to start a proof} \begin{enumerate}[] \item \texttt{(add-tvar-name \textsl{name1} \dots)} \index{add-tvar-name@\texttt{add-tvar-name}}\\ \item \texttt{(add-alg \dots)}, and also \\ \texttt{(add-algs \dots)} \texttt{(add-param-alg \dots)} \texttt{(add-param-algs \dots)} \index{add-param-algs@\texttt{add-param-algs}}\\ \item \texttt{(add-var-name \textsl{name1} \dots\ \textsl{type})} \index{add-var-name@\texttt{add-var-name}}\\ \item \texttt{(add-predconst-name \textsl{name1} \dots\ \textsl{arity})} \index{add-predconst-name@\texttt{add-predconst-name}}\\ \item \texttt{(add-pvar-name \textsl{name1} \dots\ \textsl{type})} \index{add-var-name@\texttt{add-pvar-name}}\\ \item \texttt{(add-program-constant \textsl{name} \textsl{type} <\textsl{rest}>)} \index{add-program-constant@\texttt{add-program-constant}}\\ \item \texttt{(add-computation-rule \textsl{lhs} \textsl{rhs})} \index{add-computation-rule@\texttt{add-computation-rule}}\\ \item \texttt{(add-rewrite-rule \textsl{lhs} \textsl{rhs})} \index{add-rewrite-rule@\texttt{add-rewrite-rule}}\\ \item \texttt{(add-global-assumption \textsl{name} \textsl{formula})} \index{add-global-assumption@\texttt{add-global-assumption}} \quad \hbox{(abbr. \texttt{aga}\index{aga@\texttt{aga}})}\\ \end{enumerate} To each command above there corresponds one to remove constants, variables etc already introduced. For example:\\ \texttt{(remove-predconst-name \textsl{name1} \dots)} \index{remove-predconst-name@\texttt{remove-predconst-name}}\\ There are also numerous display commands, in particular the following:\\ \texttt{(display-program-constants \textsl{name1} \dots)} \index{display-program-constants@\texttt{display-program-constants}}.\\ \texttt{(display-global-assumptions \textsl{string1} \dots)} \index{display-global-assumptions@\texttt{display-global-assumptions}}\\ \texttt{(display-constructors \textsl{alg-name1} \dots)}% \index{display-constructors@\texttt{display-constructors}}\\ \texttt{(display-theorems \textsl{string1} \dots)} \index{display-theorems@\texttt{display-theorems}}\\ \section{Goals} \texttt{(set-goal \textsl{formula})}\index{set-goal@\texttt{set-goal}} where \textsl{formula} needs to be closed (if not universal quantifiers will be inserted automatically).\\ \texttt{(normalize-goal \textsl{goal})} (abbr. \texttt{ng}) \index{normalize-goal@\texttt{normalize-goal}} replaces the goal by its normal form.\\ \texttt{(display-current-goal)}% \index{display-current-goal@\texttt{display-current-goal}} (abbr. \texttt{dcg}) \section{Generating interactive Proofs} %{\bf Backward Reasoning}\\ %{\bf Implication}\\ \underline{Implication}\\ \texttt{(assume \textsl{x1}\dots)}\index{assume@\texttt{assume}} moves the antecedent of a goal in implication form to the hypotheses. The hypotheses, $\textsl{x1}\dots$, should be identified by numbers or strings.\\ \texttt{(use \textsl{x})\index{use@\texttt{use}}} where \textsl{x} is \begin{itemize} \item a number or string identifying a hypothesis from the context, \item the string \inquotes{Truth}, \item the name of a theorem or global assumption. \item a closed proof, \item a formula with free variables from the context, generating a new goal.\\ \end{itemize} %{\bf Conjunction}\\ \underline{Conjunction}\\ \texttt{(split)}\index{split@\texttt{split}} expects a conjunction $A \land B$ as goal and splits it into two new goals, $A$ and $B$.\\ \texttt{(use \textsl{x} . \textsl{elab-path})}\index{use@\texttt{use}} where \textsl{x} is as in the description of the \texttt{use} command for implication and \textsl{elab-path} consists of \texttt{'left} or \texttt{'right}.\\ %{\bf Universal Quantifier}\\ \underline{Universal Quantifier}\\ \texttt{(assume \textsl{x1}\dots)}\index{assume@\texttt{assume}} moves universally quantified variables into the context. The variables need to be named (by using previously declared names of the appropriate types).\\ \texttt{(use \textsl{x} . \textsl{terms})} \index{use@\texttt{use}} where \textsl{x} is as in the case of implication and the optional \textsl{terms} is here a list of terms. One needs to explicitly provide terms for those variables that cannot be automatically instantiated by pattern unification. On the contrary, when pattern unification succeeds in finding appropriate instances for the quantifiers in the goal, then these instances will be automatically inserted.\\ %{\bf Existential Quantifier}\\ \underline{Existential Quantifier}\\ \texttt{(ex-intro \textsl{term})}\index{ex-intro@\texttt{ex-intro}} by this command the user provides a term to be used for the present (existential) goal.\\ \texttt{(ex-elim \textsl{x})}\index{ex-elim@\texttt{ex-elim}}, where \textsl{x} is \begin{itemize} % \item a number or string identifying an existential hypothesis from the context, % \item the name of an existential global assumption or theorem, % \item a closed proof on an existential formula, % \item an existential formula with free variables from the context, genera\-ting a new goal.\\ % \end{itemize} %Let $\ex y A$ be the existential formula identified by \textsl{x}. %The user is then asked to provide a proof for the present goal, %assuming that a $y$ satisfying $A$ is available.\\ %{\bf Classical Existential Quantifier}\\ \underline{Classical Existential Quantifier}\\ \texttt{(exc-intro \textsl{terms})}\index{exc-intro@\texttt{exc-intro}} this command is analogous to \texttt{(ex-intro)}, but it is used in the case of a classical existential goal.\\ \texttt{(exc-elim \textsl{x})}\index{exc-elim@\texttt{exc-elim}} this corresponds to \texttt{(ex-elim)} and applies to a classical existential quantifier.\\ \section{Other general commands} \texttt{(use-with \textsl{x} . \textsl{x-list})} \index{use-with@\texttt{use-with}} is a more verbose form of \texttt{use}, where the terms are not inferred via unification, but have to be given explicitly. Here \textsl{x} is as in \texttt{use}, and \textsl{x-list} is a list consisting of \begin{itemize} % \item a number or string identifying a hypothesis form the context, % \item the name of a theorem or global assumption, % \item a closed proof, % \item the string \inquotes{?} %(value of \texttt{DEFAULT-GOAL-NAME}), generating a new goal, % \item \texttt{'left} or \texttt{'right}, % \item a term, whose free variables are added to the context.\\ % \end{itemize} %Notice that \texttt{use-with} allows us to introduce %new free variables in the context. %This is to allow for proofs with free variables.\\ \texttt{(inst-with \textsl{x} . \textsl{x-list})}\index{inst-with@\texttt{inst-with}} does for forward chaining the same as \texttt{use-with} for backward chaining. It adds a new hypothesis which is an instance of a selected hypothesis or of a theorem. %Notice that this effect could also be obtained by cut. Here \textsl{x} and \textsl{x-list} are as in \texttt{use-with}. %\begin{itemize} %% %\item a number or string identifying a hypothesis form the context, %% %\item the name of a theorem or global assumption, %% %\item a closed proof, %% %\item a formula with free variables from the context, generating a new %goal. %% %\end{itemize} % is a list consisting of %\begin{itemize} %% %\item a number or string identifying a hypothesis form the context, %% %\item the name of a theorem or global assumption, %% %\item a closed proof, %% %\item the string \inquotes{?} %%(value of \texttt{DEFAULT-GOAL-NAME}), %generating a new goal, %% %\item \texttt{'left} or \texttt{'right}, %% %\item a type, %% %\item a term, whose free variables are added to the context.\\ %% %\end{itemize} \newpage \texttt{(inst-with-to \textsl{x} . \textsl{x-list} \texttt{name-hyp})} \index{inst-with-to@\texttt{inst-with-to}} expects a string as its last argument, to name the newly introduced instantiated hypothesis.\\ \texttt{(cut \textsl{A})}\index{cut@\texttt{cut}} replaces the goal $B$ by the two new goals $A$ and $A \to B$. Note that the same effect can also be produced by means of the \texttt{use} command.\\ \texttt{(ind)}\index{ind@\texttt{ind}} expects a goal $\forall x^\rho A$ with $\rho$ an algebra. If $c_1, \dots, c_n$ are the constructors of the algebra $\rho$, then \texttt{(ind)} will generate $n$ new goals: \sloppy $\forall \vec{x}_i. \subst{A}{x}{x_{1i}} \to \dots \to \subst{A}{x}{x_{ki}} \to \subst{A}{x}{c_i \vec{x}_i}$.\\ %\texttt{(ind \textsl{t})} %expects a goal $\subst{A}{x}{t}$. It %computes the algebra $\rho$ as type of the term $t$. Then again %it produces $n$ new goals: %$\forall \vec{x}_i. \subst{A}{x}{x_{1i}} \to \dots \to %\subst{A}{x}{x_{ki}} \to \subst{A}{x}{c_i \vec{x}_i}$.\\ \texttt{(simind \textsl{all-formula1} \dots)}\index{simind@\texttt{simind}} expects a goal $\forall x^\rho A$ with $\rho$ an algebra. The user provides other formulas to be proved simultaneously with the given one.\\ \texttt{(cases)}\index{cases@\texttt{cases}} expects a goal $\forall x^\rho A$ with $\rho$ an algebra. Assume that $c_1,\dots, c_n$ are the constructors of the algebra $\rho$. Then $n$ new (simplified) goals $\forall \vec{x}_i \subst{A}{x}{c_i \vec{x}_i}$ are generated.\\ %\texttt{(cases \textsl{t})} %expects a goal $\subst{A}{x}{t}$. It %computes the algebra $\rho$ as type of the term $t$. Then again $n$ %new goals $\forall \vec{x}_i \subst{A}{x}{c_i \vec{x}_i}$ are %generated.\\ %\texttt{(cases \textsl{'auto})} (or %\texttt{(casedist)}\index{casedist@\texttt{casedist}}) %expects an %atomic goal and checks whe\-ther its boolean kernel contains an if-term %whose test is neither an if-term nor contains bound variables. With %the first such test \texttt{(cases \textsl{test})} is called.\\ %{\bf General commands}\\ \texttt{(simp x)}\index{simp@\texttt{simp}} expects a known fact of the form $r^{\boole}$, $\lnot r^{\boole}$, $t=s$ or $t \approx s$. In case $r^{\boole}$, the boolean term $r$ in the goal is replaced by $T$, and in case $\lnot r^{\boole}$ it is replaced by $F$. If $t=s$ (resp. $t \approx s$), the goal is written in the form $\subst{A}{x}{t}$. Using Compat-Rev (i.e. $\forall x,y.x = y \to P y \to P x$) (resp. Eq-Compat-Rev (i.e. $\forall x,y.x \approx y \to P y \to P x$)) the goal $\subst{A}{x}{t}$ is replaced by $\subst{A}{x}{s}$, where $P$ is $\set{x}{A}$, $x$ is $t$ and $y$ is $s$. Here \textsl{x} is \begin{itemize} % \item a number or string identifying a hypothesis form the context, % \item the name of a theorem or global assumption, or % \item a closed proof.\\ % \item a formula with free variables from the context, generating a new goal. \end{itemize} % \texttt{(simpeq x)}\index{simpeq@\texttt{simpeq}} % needs to know $t \approx s$. The goal is written in the form % $\subst{A}{x}{t}$. Using Compat-Rev % (i.e. $\forall x,y. x \approx y \to P y \to P x$), the goal % $\subst{A}{x}{t}$ is replaced by $\subst{A}{x}{s}$, % with $\set{x}{A}$ for $P$, $t$ for $x$ and $s$ for $y$.\\ %\newpage \texttt{(name-hyp \textsl{i x1}})\index{name-hyp@\texttt{name-hyp}} expects an index $i$ and a string. Then a new goal is created, which differs from the previous one only in display aspects: the string names the $i$th hypothesis.\\ \texttt{(drop . x-list)}\index{drop@\texttt{drop}}, hides (but does not erase) the hypothesis listed in \texttt{x-list}. If \texttt{x-list} is empty, all hypotheses are hidden.\\ \newpage \texttt{(by-assume-with \textsl{x} \textsl{y} \textsl{u})}\index{by-assume-with@\texttt{by-assume-with}} is used when proving a goal $G$ from an existential hypothesis $ExHyp \colon \ex y A$. It corresponds to saying \inquotes{by $ExHyp$ assume we have a $y$ satisfying $A$}. Here \textsl{x} identifies an existential hypothesis, and we assume the variable $y$ and the kernel $A$ (with label $u$). This command corresponds to the sequence \texttt{(ex-elim \textsl{x})}, \texttt{(assume \textsl{y} \textsl{u})}, \texttt{(drop \textsl{x})}.\\ %\texttt{(min-pr \textsl{x} \textsl{measure})} %\index{min-pr@\texttt{min-pr}} %where \textsl{x} is %\begin{itemize} %% %\item a number or string identifying a classical existential hypothesis %from the context, %% %\item the name of a classical existential global assumption or theorem, %% %\item a closed proof on a classical existential formula, %% %\item a classical existential formula with free variables from the context, %generating a new goal. %% %\end{itemize} %The result is a new implicational goal, whose premise provides the %(classical) existence of instances with least measure. %{\bf Forward Reasoning}\\ %{\bf Implication, Conjunction, Universal Quantifier} %\underline{Implication, Conjunction, Universal Quantifier}\\ \texttt{(undo)}\index{undo@\texttt{undo}} or \texttt{(undo \textsl{n})}\index{undon@\texttt{undon}} has the effect of cancelling the last step in a proof, or the last \textsl{n} steps, respectively.\\ \section{Automation and Search} \texttt{(strip)}\index{strip@\texttt{strip}} moves all universally quantified variables and hypotheses of the current goal into the context.\\ \texttt{(strip n)} does the same as \texttt{(strip)} but only for $n$ variables or hypotheses.\\ \texttt{(proceed)}\index{proceed@\texttt{proceed}} automatically refines the goal as far as possible as long as there is a unique proof. When the proof is not unique, it prompts with the new refined goal, and allows to proceed in an interactive way.\\ \texttt{(prop)}\index{prop@\texttt{prop}} searches for a proof of the stated goal. It is devised for propositional logic only.\\ \texttt{(search \textsl{m} \textsl{(name1 m1)} \dots)}\index{search@\texttt{search}} expects for \textsl{m} a default value of multiplicity (i.e. a positive integer stating how often the assumptions are to be used). Here \textsl{name1} $\dots$ are \begin{itemize} % \item numbers or names of hypotheses from the present context or % \item names of theorems or global assumptions, % \end{itemize} and \textsl{m1} $\dots$ indicate the multiplicities of the specific \textsl{name1} $\dots$. To exclude a hypothesis one can list it with multiplicity $0$.\\ %\texttt{(searchex)}\index{searchex@\texttt{searchex}}\\ \section{Displaying proofs objects} \texttt{(display-proof\ . \textsl{opt-proof})} \index{display-proof@\texttt{display-proof}} (abbr. \texttt{dp})\\ \texttt{(display-proof-expr\ . \textsl{opt-proof})} \index{display-proof-expr@\texttt{display-proof-expr}} (abbr. \texttt{dpe})\\ \texttt{(display-eterm . \textsl{opt-proof})} (abbr. \texttt{det})\\ \texttt{(check-and-display-proof)} (abbr. \texttt{cdp})\\ \texttt{(display-normalized-proof\ . \textsl{opt-proof})} \index{display-normalized-proof@\texttt{display-normalized-proof}} (abbr. \texttt{dnp}) \\ \texttt{(display-normalized-proof-expr\ . \textsl{opt-proof})} \index{display-normalized-proof-expr@\texttt{display-normalized-proof-expr}} (abbr. \texttt{dnpe})\\ \texttt{(display-normalized-eterm . \textsl{opt-proof})} (abbr. \texttt{dnet}) \bibliography{minlog} \bibliographystyle{amsplain} \end{document} %%% Local Variables: %%% mode: latex-math %%% TeX-master: t %%% End: minlog-4.0.99.20080304/doc/infrule.sty0000644000175000017500000001606710070303142016243 0ustar barralbarral%infrule.sty %99-05-07 (from M. Hofmann) %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%% %%% %%% BCP's latex tricks for typesetting inference rules %%% %%% %%% %%% Version 1.3 %%% %%% %%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%% %%% This package supports two styles of rules: named and unnamed. %%% Unnamed rules are centered on the page. Named rules are set so %%% that a series of them will have the rules centered in a vertical %%% column taking most of the page and the labels right-justified. %%% When a label would overlap its rule, the label is moved down. %%% %%% The width of the column of labels can be varied using a command of the %%% form %%% %%% \typicallabel{T-Arrow-I} %%% %%% The default setting is: %%% %%% \typicallabel{} %%% %%% In other words, the column of rules takes up the whole width of %%% the page: rules are centered on the centerline of the text, and no %%% extra space is left for the labels. %%% %%% The minimum distance between a rule and its label can be altered by a %%% command of the form %%% %%% \setlength{\labelminsep}{0.5em} %%% %%% (This is the default value.) %%% %%% Examples: %%% %%% An axiom with a label in the right-hand column: %%% %%% \infax[The name]{x - x = 0} %%% %%% An inference rule with a name: %%% %%% \infrule[Another name] %%% {\mbox{false}} %%% {x - x = 1} %%% %%% A rule with multiple premises on the same line: %%% %%% \infrule[Wide premises] %%% {x > 0 \andalso y > 0 \andalso z > 0} %%% {x + y + z > 0} %%% %%% A rule with several lines of premises: %%% %%% \infrule[Long premises] %%% {x > 0 \\ y > 0 \\ z > 0} %%% {x + y + z > 0} %%% %%% A rule without a name, but centered on the same vertical line as rules %%% and axioms with names: %%% %%% \infrule[] %%% {x - y = 5} %%% {y - x = -5} %%% %%% A rule without a name, centered on the page: %%% %%% \infrule %%% {x = 5} %%% {x - 1 > 0} %%% %%% %%% Setting the flag \indexrulestrue causes an index entry to be %%% generated for each named rule. %%% %%% Setting the flag \suppressrulenamestrue causes the names of all rules %%% to be left blank %%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%% The font for setting inference rule names \newcommand{\rn}[1]{% \ifmmode \mathchoice {\mbox{\sc #1}} {\mbox{\sc #1}} {\mbox{\small\sc #1}} {\mbox{\tiny\uppercase{#1}}}% \else {\mbox{\sc #1}}% \fi} \newif\ifsuppressrulenames \suppressrulenamesfalse \newif\ifbcprulessavespace \bcprulessavespacefalse %%% How to display a rule's name to the right of the rule \newcommand{\inflabel}[1]{% \ifsuppressrulenames\else \def\lab{#1}% \ifx\lab\empty \relax \else (\rn{\lab})% \fi\fi } %%% Amount of extra space to add before and after a rule \newlength{\afterruleskip} \setlength{\afterruleskip}{\bigskipamount} %%% Minimum distance between a rule and its label \newlength{\labelminsep} \setlength{\labelminsep}{0.2em} %%% The ``typical'' width of the column of labels: labels are allowed %%% to project further to the left if necessary; the rules will be %%% centered in a column of width \linewidth - \labelcolwidth \newdimen\labelcolwidth %%% Set the label column width by providing a ``typical'' label -- %%% i.e. a label of average length \newcommand{\typicallabel}[1]{ \setbox \@tempboxa \hbox{\inflabel{#1}} \labelcolwidth \wd\@tempboxa } \typicallabel{} %%% A flag controlling generation of index entries \newif \ifindexrules \indexrulesfalse %%% Allocate some temporary registers \newbox\labelbox \newbox\rulebox \newdimen\ruledim \newdimen\labeldim %%% Put a rule and its label on the same line if this can be done %%% without overlapping them; otherwise, put the label on the next %%% line. Put a small amount of vertical space above and below. \newcommand{\layoutruleverbose}[2]% {\unvbox\voidb@x % to make sure we're in vmode \vspace{\afterruleskip}% \setbox \rulebox \hbox{$\displaystyle #2$} \setbox \labelbox \hbox{#1} \ruledim \wd \rulebox \labeldim \wd \labelbox %%% Will it all fit comfortably on one line? \@tempdima \linewidth \advance \@tempdima -\labelcolwidth \ifdim \@tempdima < \ruledim \@tempdima \ruledim \else \advance \@tempdima by \ruledim \divide \@tempdima by 2 \fi \advance \@tempdima by \labelminsep \advance \@tempdima by \labeldim \ifdim \@tempdima < \linewidth % Yes, everything fits on a line \@tempdima \linewidth \advance \@tempdima -\labelcolwidth \hbox to \linewidth{% \hbox to \@tempdima{% \hfil \box\rulebox \hfil}% \hfil \hbox to 0pt{\hss\box\labelbox}% } \else % No, better put the label on the next line \@tempdima \linewidth \advance \@tempdima -\labelcolwidth \hbox to \linewidth{% \hbox to \@tempdima{% \hfil \box\rulebox \hfil} \hfil}% \penalty10000 \hbox to \linewidth{% \hfil \box\labelbox} \fi \vspace{\afterruleskip}% \@doendpe % use LaTeX's trick of inhibiting paragraph indent for % text immediately following a rule \ignorespaces } % Alternate form, for when we need to save space \newcommand{\layoutruleterse}[2]% {$\displaystyle #2$ \hspace{\labelminsep} #1 } %%% Select low-level layout driver based on \bcprulessavespace flag \newcommand{\layoutrule}[2]{% \ifbcprulessavespace \layoutruleterse{#1}{#2} \else \layoutruleverbose{#1}{#2} \fi } %%% Commands for setting axioms and rules \newcommand{\typesetax}[1]{ \begin{array}{@{}c@{}}#1\end{array}} \newcommand{\typesetrule}[2]{ \frac{\begin{array}{@{}c@{}}#1\end{array}} {\begin{array}{@{}c@{}}#2\end{array}} } %%% Indexing \newcommand{\maybeindex}[1]{\ifindexrules\index{#1@\rn{#1}}\fi} %%% Setting axioms, with or without names \def\infax{\@ifnextchar[{\@infaxy}{\@infaxx}} \def\@infaxx#1{% \ifbcprulessavespace $\typesetax{#1}$ \else $$\typesetax{#1}$$ \fi} \def\@infaxy[#1]{\maybeindex{#1}\@infax{\inflabel{#1}}} \def\@infax#1#2{\layoutrule{#1}{\typesetax{#2}}} %%% Setting rules, with or without names \def\infrule{\@ifnextchar[{\@infruley}{\@infrulex}} \def\@infrulex#1#2{% \ifbcprulessavespace $\typesetrule{#1}{#2}$% \else $$\typesetrule{#1}{#2}$$% \fi} \def\@infruley[#1]{\maybeindex{#1}\@infrule{\inflabel{#1}}} \def\@infrule#1#2#3{\layoutrule{#1}{\typesetrule{#2}{#3}}} %%% Miscellaneous helpful definitions \newcommand{\andalso}{\quad\quad} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%% Commands added by mh for reverse compatibility %%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \newcommand{\unnamedinfax}[1]{\infax{#1}} \newcommand{\unnamedinfrule}[2]{\infrule{#1}{#2}} minlog-4.0.99.20080304/doc/mpcref.tex0000644000175000017500000007636610746604522016064 0ustar barralbarral% $Id: mpcref.tex,v 1.12 2008/01/25 13:30:19 logik Exp $ \documentclass[11pt,a4paper]{article} % for pdftex \usepackage[backref]{hyperref} % set all margins to 1 inch \evensidemargin 0pt \oddsidemargin 0pt \textwidth \paperwidth \addtolength\textwidth{-2in} \topmargin 0pt \addtolength\topmargin{-\headheight} \addtolength\topmargin{-\headsep} \topskip 0pt \textheight \paperheight \addtolength\textheight{-2in} \parindent 0pt \parskip .5\baselineskip plus .5\baselineskip \def\MPC{\textsc{Mpc}} \def\Minlog{\textsc{Minlog}} \title{The \Minlog\ Proof Checker: \MPC} \author{Martin Ruckert} \begin{document} \maketitle \tableofcontents \newpage \section{Lexical Structure} The input of \MPC\ is a stream of characters that usually comes from a regular text file (see Invocation). \MPC\ will group these characters together to form whitespace, comments, punctuation, strings, names, keywords, numbers, or indices. Keywords, punctuation, names, numbers and indices are collectively called tokens. \paragraph{Whitespace:} You will know whitespace, when you see it. Otherwise, you may look up the function isspace in a scheme revised five report. Whatever this function regards as whitespace, is whitespace. Whitespace is of no significance to \textsc{MPC}---except between a name and an index (see below). Its only purpose is to separate two tokens and to make files more readable. \paragraph{Comments:} Comments are started with \verb\//\ and extend until the end of line or the end of file. Like whitespace, comments separate tokens and can improve readability. \paragraph{Numbers and Indices} Numbers and Indices are both formed from the digits 0, 1, 2, 3, 4, 5, 6, 7, 8, and 9. They form an index, if they follow immediately after a name, or after an underscore \verb/_/, or after a caret \verb/^/. Otherwise, they form a number. \paragraph{Punctuation} The following characters are punctuation: \verb/{/, \verb/}/, \verb/[/, \verb/]/, \verb/(/, \verb/)/, \verb/./, \verb/;/, \verb/,/, and \verb/"/. \paragraph{Strings} Strings start with a ``\verb/"/'' character and end with the following ``\verb/"/'' character or the end of the input. Inside a string any character can be escaped by a preceding backslash ``\verb/\/''. The preceding backslash strips a character of any special meaning and inserts it plainly into the string. This is useful only for the characters that have any special meaning: the quote and the backslash itself. That is, ``\verb/\"/'' will insert a double-quote into the string without terminating it, and ``\verb/\\/'' will insert a single backslash into the string. \paragraph{Names} Names are formed either from letters only or from special characters only. All characters except whitespace, digits, letters, and punctuation are considered special characters. Names can be indexed by a number. There must be no whitespace between name and index. E.g \verb/f15/ is the function $f_{15}$ whereas \verb/f 15/ is the function $f$ applied to the number $15$. Examples of names are ``\verb/hello/'', ``\verb/sigma/'', ``\verb/==>/'', and ``\verb/|-/''. The following strings are {\bf not} names: ``\verb/x_i/'', ``\verb/(;-)/'', or ``\verb/id3tag/''. Note: All names of types, variables, functions, or pre\-di\-cates must be declared before they can be used. Once an alphabetic name is declared, it provides an infinite sequence of objects (types, variables, functions, predicates) using indexing. \paragraph{Keywords} Keywords are predefined names with a fixed, build in meaning. All the keywords that \MPC\ knows about are explained below. \section{Syntax} The syntax of \MPC\ uses as main ingredients formulas and terms. Since \MPC\ is a front-end to the \Minlog\ system it uses the \Minlog\ syntax to specify formulas and terms. To find out how to write formulas and terms you can either rely on your intuition or consult the \Minlog\ manual. Here we describe only the syntax that is particular to \MPC. \MPC\ is designed to process normal text files. Every text file starts with the keyword ``\texttt{MPC}'' followed by a semicolon ``\texttt{;}''. Then a list of commands follows. \subsection{Commands} A command can be one of the following: \begin{itemize} \item \texttt{LOAD} \texttt{"}\textit{Filename}\texttt{"} \texttt{;}\\ reads in the given file as \textsc{Scheme} code. Every possible effect can be obtained in this way by writing appropriate \textsc{Scheme} code. If the file does not exist in the current directory, it is searched for in a directory of library files. \item \texttt{INCLUDE} \texttt{"}\textit{Filename}\texttt{"} \texttt{;}\\ reads in the given file as \MPC\ code. It can be used to store e.g.\ a collection of definitions and axioms in a file and load them into several proofs. If the file does not exist in the current directory, it is searched for in a directory of library files. Currently two libraries exist: nat.mpc gives definitions of natural numbers, and list.mpc gives definitions for lists. \item \texttt{SCHEME} \texttt{"}\textit{Scheme Expression}\texttt{";}\\ reads the given string with the \textsc{Scheme} function \texttt{read} and supplies the result as an argument to the \textsc{Scheme} function \texttt{eval}, in effect evaluating the given string like regular \textsc{Scheme} code. Note that inside a string doublequotes need to be escaped by a preceding backslash. For example: \verb/SCHEME "(display \"hello world!\")";/. \item \texttt{PROOF;} starts a proof. This will set the list of known facts to an empty list. \MPC\ will forget all previously proved formulas. Two variations exist: \texttt{CLASSIC PROOF} starts a proof with the rule of Stability enabled---that is you can conclude $A$ from $\lnot\lnot A$. \texttt{INTUITIONISTIC PROOF} starts a proof with the special rule of Ex-Falso-Quodlibet enabled --- that is you can conclude any $A$ from $\bot$. \item \texttt{END;} terminates a proof (currently optional). \item A declaration, \item an assumption, \item a claim, \item a block, or \item a syntax specification as detailed below. \end{itemize} \subsection{Declarations} Declarations are used to tell \MPC\ about types, variables, functions, and predicates. \paragraph{Types:} Types come as type variables, as simultaneously defined finite algebras, or as composed types. Only variable types and algebra types need to be declared. Type operators (mostly \verb/=>/) can be freely used to construct composed types. To declare a type variable the keyword ``\texttt{TYPE}'' is followed by a dot ``\texttt{.}'', a list of names, and finally a semicolon ``\texttt{;}''. It will make the given names type variables. For example: \texttt{TYPE . rho sigma tau;} will make \texttt{rho}, \texttt{sigma}, and \texttt{tau} new type variables. There is one predefined type variable \texttt{alpha} that exists, for internal reasons, right from the start. By adding an index to a variable name, an infinite number of different type variables can be obtained, e.g.\ \texttt{alpha5}, \texttt{tau123}, or \texttt{sigma1}. Algebra types are described in the next section. \paragraph{Algebra Types:} Objects of a free algebra type are build by applying an appropriate constructor to already existing objects. For example, the free algebra of natural numbers can be defined with the constructors \texttt{zero} and \texttt{successor}. \texttt{zero} is a constructor of empty arity, that is, it is applied to no object and yields a natural number, and \texttt{successor} is applied to a natural number and yields again a natural number. Hence ``\texttt{successor zero}'' is for example a natural number. To specify an algebra, the name of the algebra together with the name of the constructors and their arity has to be given. The syntax is: \noindent \texttt{ALGEBRA} \textit{algebra-name} \verb/{/ \verb/{/ \verb/{/\\ \texttt{\ \ \textit{constructor-type} . \textit{constructor-name} ; }\\ \texttt{\ \ \textit{constructor-type} . \textit{constructor-name} ; }\\ \ldots\\ \ \verb/}/\ \texttt{;} For example to define the algebra of natural numbers, one can write \noindent \texttt{ALGEBRA nat}\ \verb/{/\\ \texttt{\ \ nat . zero ;} \\ \texttt{\ \ nat => nat . successor ;} \\ \ \verb/}/ Algebras can have type parameters. For example a list type, might have the type of the list elements as a parameter. To declare an algebra with type parameters, the number of type parameters has to be given after the algebra name. In this case the type variables \texttt{alpha0}, \texttt{alpha1}, \ldots can be used in the \textit{constructor-types}. For example to define the free algebra of lists of elements of type \texttt{alpha0} one can write: \noindent \texttt{ALGEBRA list 1}\ \verb/{/\\ \texttt{\ \ list . nil ;} \\ \texttt{\ \ alpha0 => list => list . cons ;} \\ \ \verb/}/\ \texttt{;} Finally, several algebras can be defined simultaneously by listing all their names after the keyword \texttt{ALGEBRA}. As a last example we construct an algebra of labeled trees, where labels can be either of type \texttt{alpha0}, or of type \texttt{alpha1}, or again labeled trees. \noindent \texttt{ALGEBRA tree label 2}\ \verb/{/\\ \texttt{alpha0 => label . first ;}\\ \texttt{alpha1 => label . second ;}\\ \texttt{tree => label . third ;} \noindent \texttt{tree . empty;}\\ \texttt{label => tree => tree => tree . node ;}\\ \ \verb/}/\ \texttt{;} \paragraph{Variables:} Variables are declared by specifying their type. The syntax is: \noindent \textit{type} \texttt{.} \textit{variablenames} \texttt{;} For example \noindent \texttt{nat => nat . f g ; } defines \texttt{f} and \texttt{g} as function variables mapping natural numbers to natural numbers. Similarly \texttt{f3} or \texttt{g2} are such function variables. \paragraph{Functions:} Function constants have a name and a type (like function variables) but in addition have fixed computational rules or rewrite rules attached to them. These rules are used automatically by the prover to find out whether two terms are equal. A function declaration starts with the keyword \texttt{FUNCTION} followed by the target type of the function, a dot, the name of the function and a list of argument types. After the name follows a list of rules enclosed in braces. A semicolon terminates the function declaration. As a special convenience, syntax declarations (see below) can be used inside the rule list. A rule has the form \textit{term} \texttt{->} \textit{term} \texttt{;} It states that the left term can be replaced by the right term. Of course, the principal operator of the left term has to be the function currently being defined. Furthermore each rule must have a unique left hand side. More rules can be added if prefixed with the keyword \texttt{REWRITE}. These additional rules are applied more carefully (avoiding rewrite loops) and hence are more flexible but slower. The following example illustrates this: \begin{tabbing} \tt SYNTAX ++ PREFIXOP successor ; \\ \tt FUNCTION nat . plus (nat nat) \\ \tt \verb/{/ \= \\ \>\tt SYNTAX + ADDOP plus ;\\ \\ \>\tt \phantom{REWRITE} n + zero -> n ;\\ \>\tt \phantom{REWRITE} n + ++m -> ++(n + m) ;\\ \>\tt REWRITE zero + n -> n;\\ \>\tt REWRITE ++n + m -> ++(n + m) ;\\ \>\tt REWRITE n + ( m+ k) -> (n + m) + k ; \\ \tt \verb/}/;\\ \end{tabbing} Note, that syntax declarations, explained below, are allowed inside a function declaration to enable the use of a more convenient syntax, e.g.\ infix notation, if so desired. Functions, by default, are total functions. It is possible to define partial functions by adding the keyword \texttt{PARTIAL} in front of the keyword \texttt{FUNCTION}. \paragraph{Predicates:} To declare a predicate the keyword \texttt{PRED} is used followed by a list of types of the arguments (if any), a dot, a list of predicate names, and finally a semicolon. For example: \noindent \texttt{PRED . A B ; } defines two predicate variables \texttt{A} and \texttt{B} (as well as \texttt{A0}, \texttt{A1}, \ldots). They take no arguments and can be used like propositional variables. \noindent \texttt{PRED nat nat . R ; } defines a binary predicate over natural numbers (a relation), and \noindent \texttt{PRED nat=>nat . P ; } defines a unary predicate over functions of natural numbers. \subsection{Assumptions and Claims} An assumption is a formula followed by a dot ``\texttt{.}'' and a claim is a formula followed by a semicolon ``\texttt{;}''. In the first case the formula is added to the list of known formulas, in the second case an attempt is made to prove the formula form the formulas already known to be true. Formulas are defined inductively: Every predicate variable \texttt{A} is a formula. If \texttt{M} and \texttt{N} are formulas then \texttt{M \& N} (conjunction) and \texttt{M -> N} (implication) are formulas. If \texttt{M} is a formula and \texttt{x} is a variable, then \texttt{all x M} and \texttt{ex x M} are formulas. Examples of formulas and details of how proofs work can be found below. For example: \noindent \texttt{PRED . A ; // A is a proposition }\\ \texttt{A. // we assume A holds }\\ \texttt{A \& A; // we claim that A \& A can be proven} \subsection{Blocks} Blocks are used to construct conditional proofs. They start with \verb\{\ and end with \verb\}\. After the opening brace follows either a formula or a variable followed by a dot. This is the local formula or variable of this block. Next a block contains a non empty sequence of claims or other blocks. When the block is closed it proves an implication --- in case of a local formula --- or an all quantified formula --- in case of a local variable. The exact usage is explained below. Here an example may suffice: \begin{tabbing} \tt PRED alpha . P ; // P is a predicate \\ \tt alpha . x ; // x is a variable of type alpha \\ \tt \verb/{/ \=\tt x0 . // assume x0 is given \\ \tt \>\tt \verb/{/ \= \tt P x0 . // assume further that P of x0 holds \\ \tt \> \>\tt P x0 ; // then P of x0 holds \\ \tt \>\tt \verb/}/ // this proves P x0 -> P x0 \\ \tt \verb/}/ // this proves all x0 . P x0 -> P x0 \\ \end{tabbing} \subsection{Syntax Declarations} In mathematics, functions are often written in infix, prefix, or postfix notation. Instead of writing \texttt{plus x y} --- the function \texttt{plus} applied to \texttt{x} and \texttt{y} --- we like to write \texttt{x + y}. To facilitate this \MPC\ has syntax declarations. A syntax declaration starts with the keyword \texttt{SYNTAX} followed by the name of the new operator, followed by the tokentype, followed by a term, and a semicolon. A tokentype is one of the following (in order of increasing binding strength): \texttt{PAIROP}, \texttt{IMPOP}, \texttt{OROP}, \texttt{ANDOP}, \texttt{RELOP}, \texttt{ADDOP}, \texttt{MULOP}, \texttt{PREFIXOP}, \texttt{POSTFIXOP}, or \texttt{CONST}. A \texttt{PAIROP} and \texttt{IMPOP} are right associative, a \texttt{RELOP} is not associative, and all other infix operators are left associative. After the syntax declaration, any term containing the new operator as main connective is replaced by the term given in the syntax declaration applied to the arguments of the operator. For example: \noindent \texttt{boole . a b; // a and b are booleans. }\\ \texttt{boole=>boole=>boole . f ; // f is a function. }\\ \texttt{SYNTAX | ADDOP f0 ; // we write | as infix operator for f0.} \noindent \texttt{all a . true|a . // for all a ((f0 true) a) holds.}\\ \texttt{all a,b . a|b -> b|a. // f0 is commutative.}\\ \texttt{false|false -> bot. // ((f0 false) false) implies bot.} Syntax declarations might be parameterized with type variables \texttt{alpha0}, \texttt{alpha1}, \ldots If the defining term contains one of these type variables, the types of the actual arguments are matched against the type of the operator to instantiate the type variables. If the types match, the syntax declaration is used. Further, the same operator might be redefined in several syntax declarations as long as all of these declarations use the same tokentype. Multiple declarations are tested in the order declared and the first matching declaration is used. Note: Syntax declarations are allowed inside a function declaration. \section{Proofs} The \Minlog\ Proof Checker is able to check proofs in ``natural deduction'' style. It maintains a list of formulas, called the context, which are assumed or known or proven to be true. Initially this list is empty; using the keyword \texttt{PROOF;} it can be reset to an empty list at any time to start a new proof. There are only two methods to add a formula to the context: First, one can assume the formula by just stating it and putting a dot behind it. This is called an assumption. Example: \begin{verbatim} PRED . A B; // A and B are propositional variables A. // Let's assume A holds A -> B. // Let's assume A implies B \end{verbatim} Second, one can prove a formula from other formulas already known --- i.e.\ fformulas already part of the context --- using the rules of natural deduction. The claim that a formula can be proved is expressed by stating the formula followed by semicolon. This is called a claim. \MPC\ then will check whether there are indeed formulas in the context, which prove the new formula using exactly one rule of natural deduction. If that is not possible, \MPC\ will start a limited proof search trying to obtain a more complicated proof of the claimed formula. If a proof is found this is indicated in an appropriate warning message. It tells the user that the formula is indeed provable, but not with a single step. If the proof search does not discover a proof, \MPC\ will simply assume the formula and continue. It will output a corresponding error message, and it should be clear that the proof has still a gap at this point. Whenever a formula is added to the context, it receives a unique number and \MPC\ will use this number later to refer to this formula in its output. A typical proof will first define the necessary types, variables, functions and predicates to establish the language of the theory, then it states a list of assumptions made (these are the axioms of the theory) and finally it starts to make claims, adding one formula at a time to the pool of knowledge (the context) available for the theory. In the end it will conclude with the final formula, a theorem of the theory. Often the language of the theory and its axioms are put into include files, to be able to conveniently load them before starting a proof. In the following sections, we will discuss all the proof rules of natural deduction, one at a time. \subsection{Simple Rules} To be applicable, these rules just require certain formulas to be already in the context. \paragraph{Trivial Proofs} If a formula, after normalization, is the same as a formula in the context, it is proven by identity. Likewise, if a formula, by normalization, reduces to \texttt{True}, it is proven. \paragraph{And Elimination:} If a formula of the form $A \land B$ is in the context, it is possible to derive either $A$ or $B$ in one step. Example: \begin{verbatim} A & B. // 0 assumed. A; // OK, 1 proved by and-elim-left from 0 B; // OK, 2 proved by and-elim-right from 0 \end{verbatim} \paragraph{And Introduction:} If two formulas $A$ and $B$ are part of the context, it is possible to derive $A \land B$ in one step. Example: \begin{verbatim} A. // 0 assumed. B. // 1 assumed. A & B; // OK, 2 proved by and-intro from 0 and 1 \end{verbatim} \paragraph{Implication Elimination:} If an implication $A \to B$ and its condition $A$ are part of the context, it is possible to derive the conclusion $B$ in one step. Example: \begin{verbatim} A -> B. // 0 assumed. A. // 1 assumed. B; // OK, 2 proved by imp-elim from 0 and 1 \end{verbatim} \paragraph{All Elimination:} If an all formula $\forall x\, A x$ is part of the context, it is possible to derive the conclusion $A t$ for any term $t$ of the appropriate type in one step. Example: \begin{verbatim} all x A x. // 0 assumed. A t; // OK, 1 proved by all-elim from 0 using t \end{verbatim} \paragraph{Existential Introduction:} If a formula $A t$ for some term $t$ is part of the context, it is possible to derive $\exists x\, A x$ in one step, where $x$ is a variable of the same type as $t$. Example: \begin{verbatim} A t. // 0 assumed. ex x A x; // OK, 1 proved by ex-intro from 0 using t \end{verbatim} \subsection{Block Rules} Sometimes it is necessary in a proof to temporarily make an assumption only to discard it later again. For example, for proving an implication $A \to B$, one would first assume $A$ holds, and then prove $B$ under this assumption. Once this is done, one can conclude that $A \to B$, and this does no longer depend on the assumption $A$. The assumption $A$ in this example behaves like a local assumption with a limited scope. In programming languages, the usual way to introduce objects with limited scope is a block structure. In \textsc{MPC}, blocks are enclosed in curly braces and introduce exactly one local object, either a formula or a variable. The scope of this local object is its defining block and all blocks nested inside it. \paragraph{Implication Introduction:} As said before, an implication $A \to B$ is proved by assuming $A$ and then proving $B$ under this assumption. Once this is done, one can conclude that $A \to B$. In \textsc{MPC}, one opens a block with the local assumption $A$ and proves inside this block the formula $B$. Immediately after the formula $B$ the block is closed again. After the closing brace of the block, \MPC\ will discard all the formulas added to the context during the block (since these may depend on the assumption $A$) and adds the implication $A \to B$ to the context, $A$ being the local formula of the block and $B$ the last formula of the block. Example: \begin{verbatim} { A t. // 0 assumed. ex x A x; // OK, 1 proved by ex-intro from 0 using t } OK, 2 A t -> ex x A x proved. ex x A x; // ERROR: 3 assumed. Proof not found. \end{verbatim} \paragraph{All Introduction:} The proof of a formula with an outer universal quantifier is similar to the proof of an implication: Under the assumption that some $x$ is given, one proves $A x$. This is sufficient to conclude $\forall x\, Ax$. For \textsc{MPC}, the proof consists of a block with a local variable $x$ with the last formula being $A x$. At the end of the block, \MPC\ will discard all the formulas added to the context during the block and adds the formula $\forall x\, Ax$ to the context. Example: \begin{verbatim} { x. // x assumed. { A x. // 0 assumed. A x; // OK, 1 proved by 0 } OK, 2 A x -> A x proved. } OK, 3 all x.A x -> A x proved. \end{verbatim} \paragraph{Existential Elimination:} A proof of a formula $B$ may use an existentially quantified formula $\exists x\, Ax$. It typically proceeds like this: If we know that $\exists x\, Ax$, let us assume we have such an $x$, call it $x_0$, such that $A x_0$ holds,\ldots and from this the proof continues to prove the formula B. This then constitutes a proof of $B$ from $\exists x\, Ax$ under the side condition that the $x_0$ is not a free variable of $B$. This proof can be formulated for \MPC\ in exactly the same fashion as outlined above using two nested blocks. The first block introduces the local variable $x_0$ and the second block the local assumption $A x_0$. Once the formula $B$ is proved from this, both blocks are closed. This in effect proves the formula $\forall x. Ax \to B$. This, together with the formula $\exists x\, Ax$, can be used to finally prove $B$ using the rule of existential elimination. Example: \begin{verbatim} ex x.A x & B. // 0 assumed. { x0. // x0 assumed. { A x0 & B. // 1 assumed. B; // OK, 2 proved by and-elim-right from 1 } OK, 3 A x0 & B -> B proved. } OK, 4 all x0.A x0 & B -> B proved. B; // OK, 5 proved by ex-elim from 4 and 0 \end{verbatim} \subsection{Induction} Induction is used to prove that a formula $Ax$ holds for all objects $x$ of a given algebra type $\tau$. This is done by considering all constructors $C_1,\ldots,C_n$ of the algebra that are capable of producing an object of the type in question and proving for each one of them that the formula $A C_i \ldots$ holds provided that the formula holds already for all arguments of $C_i$ of type $\tau$. Once this is done, all these formulas together, prove by the principle of induction, that $\forall x\, Ax$. We illustrate this using the standard example of natural numbers. The free algebra of natural numbers \texttt{nat} is generated from two constructors: \texttt{Zero} of type \texttt{nat} and \texttt{Succ} of type $\texttt{nat}\to\texttt{nat}$. For convenience we write \texttt{0} for \texttt{Zero} and $\mathtt{++}n$ for $\texttt{Succ}\,n$ To prove $\forall n\, An$ by induction, we need to prove first $A \mathtt{0}$ and $\forall n\, An \to A \mathtt{++} n$, then we can conclude the desired result. For example lets prove that $\forall n \exists m \, m = n+1$. We proceed like this: First $1 = \mathtt{0}+1$ and therefore $\exists m\, m=\mathtt{0}+1$. Second, assume $n$ is given and $\exists m\, m = n + 1$ holds. Then there is an $m_0$ with $m_0 = n + 1$, and thus $m_0 +1 = \mathtt{++}n +1$. By existential introduction, we conclude $\exists m\, m= \mathtt{++}n + 1$ and have proved $\forall n\, (\exists m\, m = n + 1 \to \exists m\, m= \mathtt{++}n + 1)$. From these, by induction, we infer: $\forall n \exists m \, m = n+1$. The complete proof written for \MPC\ reads: \begin{verbatim} MPC; INCLUDE "nat.mpc"; PROOF; // initializing mpc1 1=0+1; // OK, 0 proved trivial ex m m=0+1; // OK, 1 proved by ex-intro from 0 using 1 { n. // n assumed. { ex m m=n+1. // 2 assumed. { m0. // m0 assumed. { m0=n+1. // 3 assumed. m0+1= ++n+1; // OK, 4 proved by 3 ex m m= ++n+1; // OK, 5 proved by ex-intro from 4 using m0+1 } // OK, 6 m0=n+1 -> ex m m= ++n+1 proved. } // OK, 7 all m0.m0=n+1 -> ex m m= ++n+1 proved. ex m m= ++n+1; // OK, 8 proved by ex-elim from 7 and 2 } // OK, 9 ex m m=n+1 -> ex m m= ++n+1 proved. } // OK, 10 all n.ex m m=n+1 -> ex m m= ++n+1 proved. all n ex m m=n+1; // OK, 11 proved by ind from 10 1 \end{verbatim} \subsection{Proof by Cases} Proof by cases is similar to induction but weaker. Again, we prove that a proposition $Ax$ holds for all objects $x$ of a given algebra type $\tau$. This is done by considering all constructors $C_1,\ldots,C_n$ of the algebra that are capable of producing an object of the type in question and proving for each one of them that the formula $A C_i \ldots$ holds. In contrast to the rule of induction however, no induction hypothesis is available in the proof. A special case is the proof by cases for objects of type \texttt{boole}. Here, the constructors are \texttt{True} and \texttt{False}. We prove $Ax$ by considering the two cases, proving $A\texttt{True}$ and $A\texttt{False}$ to conclude $\forall x\, Ax$. Typically, this formula is then applied to the boolean term $t$ in question to obtaining $At$. Since this process is quite common, in addition to the usual proof by cases rule, an equivalent and more convenient rule is built into \MPC: the proof by boolean cases. To prove any formula $A$, you just have to prove $t \rightarrow A$ and $(\lnot t) \rightarrow A$. \subsection{Intuitionistic and Classical Logic} \MPC\ provides two keywords \texttt{CLASSIC} and \texttt{INTUITIONISTIC} to activate proof rules for classic and intuitionistic logic, respectively. If a proof starts with ``\texttt{INTUITIONISTIC PROOF;}'', the proof rule ``ex falso quodlibet'' is enabled. It allows to conclude from \texttt{bot} any formula whatsoever. If a proof starts with ``\texttt{CLASSIC PROOF;}'' in addition the stronger proof rule of ``stability'' is enabled. It allows to conclude the formula $A$ from a statement of $\lnot \lnot A$. It is an easy exercise to prove $\bot \to A$ from $\lnot \lnot A \to A$, and therefore stability alone would be sufficient to have classical logic. It is however convenient to have ``ex falso quodlibet'' in addition. This weaker rule is always tested first. As an example, we present a proof of the Pierce Formula $((P\rightarrow Q) \rightarrow P) \rightarrow P$. \begin{verbatim} CLASSIC PROOF; PRED . P Q; { (P -> Q) -> P. // 0 assumed. { P -> bot. // 1 assumed. { P. // 2 assumed. bot; // OK, 3 proved by imp-elim from 1 and 2 Q; // OK, 4 proved by EFQ from 3 } // OK, 5 P -> Q proved. P -> Q; // OK, 6 proved by 5 P; // OK, 7 proved by imp-elim from 0 and 6 bot; // OK, 8 proved by imp-elim from 1 and 7 } // OK, 9 (P -> bot) -> bot proved. (P -> bot) -> bot; // OK, 10 proved by 9 P; // OK, 11 proved by Stability from 10 } // OK, 12 ((P -> Q) -> P) -> P proved. END; \end{verbatim} \newpage \appendix \section{Library code} This \MPC\ code may serve as an example to illustrate the concepts of \MPC. \subsection{Natural Numbers} \begin{verbatim} MPC; ALGEBRA nat 0 { nat=>nat . Succ ; nat . Zero ; }; nat . n m k; // to use numbers we have to provide scheme code // converting numbers to terms using internals of Minlog SCHEME "(define (make-numeric-term n) (if (= n 0) (pt \"Zero\") (make-term-in-app-form (pt \"Succ\") (make-numeric-term (- n 1)))))" ; SCHEME "(define (is-numeric-term? term) (or (and (term-in-const-form? term) (string=? \"Zero\" (const-to-name (term-in-const-form-to-const term)))) (and (term-in-app-form? term) (let ((op (term-in-app-form-to-op term))) (and (term-in-const-form? op) (string=? \"Succ\" (const-to-name (term-in-const-form-to-const op))) (is-numeric-term? (term-in-app-form-to-arg term)))))))"; SCHEME "(define (numeric-term-to-number term) (if (equal? term (pt \"Zero\")) 0 (+ 1 (numeric-term-to-number (term-in-app-form-to-arg term)))))"; SYNTAX ++ PREFIXOP Succ; FUNCTION nat . Plus(nat nat) { SYNTAX + ADDOP Plus; n + 0 -> n; n + ++m -> ++(n + m); REWRITE 0 + n -> n; REWRITE ++n + m -> ++(n + m); REWRITE n + (m + k) -> n + m + k; }; FUNCTION nat . Times(nat nat) { SYNTAX * MULOP Times; n * 0 -> 0; n * ++m -> (n*m)+n; REWRITE 0*n -> 0; REWRITE ++n*m -> (n*m)+m; REWRITE n * (m * k) -> n * m * k; }; FUNCTION boole . Less(nat nat) { SYNTAX < RELOP Less; n < 0 -> False; 0 < ++n -> True; ++n < ++m -> n list => list . Cons ; }; // a generic variable of type (list alpha) (list alpha) . l; SYNTAX :: PAIROP (Cons alpha); SYNTAX : POSTFIXOP [alpha] alpha ::(Nil alpha); // example x :: y :: z : FUNCTION list alpha => list alpha . ListAppend (list alpha) { SYNTAX :+: PAIROP (ListAppend alpha); (ListAppend alpha)(Nil alpha) -> [l_2]l_2; (ListAppend alpha)(alpha :: l1) -> [l2] alpha::(l1:+:l2); }; INCLUDE "nat.mpc"; FUNCTION nat . ListLength(list alpha) { SYNTAX lh PREFIXOP (ListLength alpha); lh (Nil alpha) -> 0 ; lh (alpha :: l) -> ++ lh l; REWRITE lh (l1 :+: l2) -> lh l1 + lh l2; }; \end{verbatim} \end{document} %%% Local Variables: %%% mode: latex-math %%% End: minlog-4.0.99.20080304/doc/mlcf.tex0000644000175000017500000117774510746604522015535 0ustar barralbarral% $Id: mlcf.tex,v 1.30 2008/01/25 13:30:18 logik Exp $ \documentclass[11pt,a4paper]{amsbook} % \documentclass[11pt,draft,fleqn,a4paper]{amsbook} \usepackage{amsmath,amssymb,bussproofs,enumerate,infrule,latexsym,verbatim} % \usepackage{showidx}% prints index entry in the margin % \usepackage[notcite,notref]{showkeys}% prints labels in the margin % for pdftex % \usepackage[backref]{hyperref} \makeindex % \listfiles% causes the log file to list all the files used in typesetting \theoremstyle{plain} % \newtheorem{theorem}{Theorem}[chapter] \newtheorem{theorem}{Theorem}[section] \newtheorem*{theorem*}{Theorem} \newtheorem{lemma}[theorem]{Lemma} \newtheorem*{lemma*}{Lemma} \newtheorem{corollary}[theorem]{Corollary} \newtheorem*{corollary*}{Corollary} \newtheorem{proposition}[theorem]{Proposition} \newtheorem*{proposition*}{Proposition} \newtheorem{claim}[theorem]{Claim} \newtheorem*{claim*}{Claim} \newtheorem*{namedtheorem}{\theoremname} \newenvironment{named}[1]{\renewcommand{\theoremname}{#1} \begin{namedtheorem}} {\end{namedtheorem}} \theoremstyle{definition} \newtheorem{definition}[theorem]{Definition} \newtheorem*{definition*}{Definition} \newtheorem{notation}[theorem]{Notation} \newtheorem*{notation*}{Notation} \newtheorem{example}[theorem]{Example} \newtheorem*{example*}{Example} \newtheorem{examples}[theorem]{Examples} \newtheorem*{examples*}{Examples} \newtheorem{exercise}[theorem]{Exercise} \newtheorem*{exercise*}{Exercise} \newtheorem{exercises}[theorem]{Exercises} \newtheorem*{exercises*}{Exercises} \theoremstyle{remark} \newtheorem{remark}[theorem]{Remark} \newtheorem*{remark*}{Remark} \newtheorem*{acknowledgement*}{Acknowledgement} % minlog.mac contains the macros for all tex-files of the minlog % documentation in order to avoid code duplication and different % notations. \input{minlog.mac} \renewcommand*{\bs} {\mathit{b}\!\mathit{s}} \renewcommand*{\Eq} {\mathsf{Eq}} \renewcommand*{\FV} {\mathsf{FV}} \renewcommand*{\glossary} {\index} \renewcommand*{\mi} {\mathrm{[}\textbf{mi}\mathrm{]}} \renewcommand*{\qedsymbol} {{\ \fini\par}} \renewcommand*{\termProdElimLeft} {\mathsf{Fst}} \renewcommand*{\termProdElimRight}{\mathsf{Snd}} \renewcommand*{\termSumIntroLeft} {\mathsf{Inl}} \renewcommand*{\termSumIntroRight}{\mathsf{Inr}} \renewcommand*{\thesection} {\thechapter.\arabic{section}} %Urban \renewcommand*{\thesubsection}{\thesection.\arabic{subsection}}%Urban \renewcommand*{\thetheorem} {\thesection.\arabic{theorem}} %Urban \renewcommand*{\thetable} {\thesection.\arabic{table}} %Urban \allowdisplaybreaks[4] \begin{document} \frontmatter \begin{titlepage} \vglue 5 true cm \centerline{\huge Minimal Logic for Computable Functionals} \bigskip\bigskip \centerline{Helmut Schwichtenberg} \vglue 10 true cm \centerline{Mathematisches Institut der Universit\"at M\"unchen} \centerline{\today} \end{titlepage} \tableofcontents % \listoftables % \chapter*{Preface}\mylabel{C:Preface} \mainmatter \chapter{Minimal Logic} \mylabel{C:Logic} One of our goals is to study program extraction from proofs. We shall cover the theoretical foundations of the subject, but also try to gain some practical experience. For the latter we will make use of the system \textsc{Minlog}. \textsc{Minlog} is intended to reason about computable functionals, using mi\-ni\-mal logic. It is an interactive prover with the following features. \begin{itemize} \item Proofs are treated as first class objects: they can be normalized and then used for reading off an instance if the proven formula is existential, or changed for program development by proof transformation. \item To keep control over the complexity of extracted programs, we follow Kreisel's proposal and aim at a theory with a strong language and weak existence axioms. It should be conservative over (a fragment of) arithmetic. \item \textsc{Minlog} is based on minimal rather than classical or intuitionistic logic. This more general setting makes it possible to implement program extraction from classical proofs, via a refined $A$-translation (cf.\ \cite{BergerBuchholzSchwichtenberg02}). \item Constants are intended to denote computable functionals. Since their (mathematically correct) domains are the Scott-Ershov partial continuous functionals, this is the intended range of the quantifiers. \item Variables carry (simple) types, with free algebras as base types. The latter need not be finitary (so we allow e.g.\ countably branching trees), and can be simultaneously generated. Type parameters (ML style) are allowed, but we keep the theory predicative and disallow type quantification. Also predicate variables are allowed, as placeholders for formulas (or more precisely, comprehension terms). \item To simplify equational reasoning, the system identifies terms with the same normal form. A rich collection of rewrite rules is provided, which can be extended by the user. Decidable predicates are implemented via boolean valued functions, hence the rewrite mechanism applies to them as well. \end{itemize} \input{acknow} \section{Natural Deduction} \mylabel{S:NatDed} The system we pick for the representation of proofs is Gentzen's\index{Gentzen} natural deduction, from \cite{Gentzen34}. Our reasons for this choice are twofold. First, as the name says this is a \emph{natural} notion of formal proof, which means that the way proofs are represented corresponds very much to the way a careful mathematician writing out all details of an argument would go anyway. Second, formal proofs in natural deduction are closely related (via the \indexentry{Curry-Howard correspondence}) to terms in typed $\lambda$-calculus. This provides us not only with a compact notation for logical derivations (which otherwise tend to become somewhat unmanagable tree-like structures), but also opens up a route to applying the computational techniques which underpin $\lambda$-calculus. \subsection{First Order Languages} \mylabel{SS:FOL} For first order languages we use the standard language containing $\to$, $\land$, $\forall$, $\ex$% \index{AAAlog@$\to\land\ex$}\label{not:prop} as primitive logical operators. We assume countably infinite supplies of individual variables, $n$-place predicate (or relation) symbols (constants or variables) for all $n \in \D{N}$, symbols (again constants or variables) for $n$-ary functions for all $n \in \D{N}$. 0-place predicate symbols are also called propositional symbols. 0-argument function symbols are also called (individual) constants. The language will \emph{not}, unless stated otherwise, contain = as a primitive. \emph{Atomic} formulas\glossary{atomic formula}\glossary{formula!atomic} are formulas of the form $Rt_1 \dots t_n$, $R$ a predicate symbol, $t_1, \dots, t_n$ individiual terms. For formulas which are either atomic or $\falsum$ we use the term \emph{prime} formula% \glossary{prime formula}\glossary{formula!prime}. We use certain categories of symbols, possibly with sub- or superscripts or primed, as metavariables for certain syntactical categories (locally different conventions may be introduced): \begin{itemize} \item $x,y,z,u,v,w$ for individual variables; \item $f,g,h$ for function symbols; \item $c, d$ for individual constants; \item $t,s,r$ for terms; \item $P,Q$ for atomic formulas; \item $R$ for predicate symbols; \item $A,B,C,D,E,F$ for arbitrary formulas in the language. \end{itemize} We introduce abbreviations: \begin{alignat*}{2}%\label{not:defop} &\neg A &&:= A \to \falsum, \\ &A \leftrightarrow B &&:=(A \to B) \land (B \to A), \\ &\excl x A &&:= \neg \forall x \neg A \quad\hbox{(the classical existential quantifier)}. \end{alignat*}In writing formulas we save on parentheses by assuming that $\forall, \ex, \neg$ bind more strongly than $\land$, and that in turn $\land$ binds more strongly than $\to, \leftrightarrow$. Outermost parentheses are also usually dropped. Thus $A \land \neg B \to C$ is read as $((A \land (\neg B))\to C)$. In the case of iterated implications we sometimes use the short notation \[ A_1 \to A_2 \to \dots A_{n-1} \to A_n \quad\hbox{for} \quad A_1 \to (A_2 \to \dots (A_{n-1} \to A_n)\dots). \] To save parentheses in quantified formulas, we use a mild form of the \indexentry{dot notation}: a dot immediately after $\forall x$ or $\ex x$ makes the scope of that quantifier as large as possible, given the parentheses around. So $\forall x.A\to B$ means $\forall x(A\to B)$, not $(\forall x A) \to B$. We also save on parentheses by writing e.g.\ $Rxyz$, $Rt_0t_1t_2$ instead of $R(x,y,z)$, $R(t_0,t_1,t_2)$, where $R$ is some predicate symbol. Similarly for a unary function symbol with a (typographically) simple argument, so $fx$ for $f(x)$, etc. In this case no confusion will arise. But readability requires that we write in full $R(fx,gy,hz)$, instead of $Rfxgyhz$. \subsection{Natural Deduction} \mylabel{SS:NatDed} We give an inductive definition of derivation terms in Table~\ref{Tab:minNat}, where for clarity we have written the corresponding derivations to the left. \begin{table} \label{Tab:minNat} \begin{center} \begin{tabular}{|c|c|} \hline & \\ derivation & term \\ &\\ \hline & \\ $u \colon A$ & $u^A$ \\ &\\ \hline & \\ \AxiomC{$\phantom{M} \mid M$} \noLine \UnaryInfC{$A$} \AxiomC{$\phantom{N} \mid N$} \noLine \UnaryInfC{$B$} \RightLabel{$\andI$} \BinaryInfC{$A\land B$} \DisplayProof & $\langle M^A,N^B\rangle^{A\land B}$ \\ &\\ \hline & \\ \AxiomC{$\phantom{M} \mid M$} \noLine \UnaryInfC{$A\land B$} \RightLabel{$\andEzero$} \UnaryInfC{$A$} \DisplayProof \quad \AxiomC{$\phantom{M} \mid M$} \noLine \UnaryInfC{$A\land B$} \RightLabel{$\andEone$} \UnaryInfC{$B$} \DisplayProof & $(M^{A\land B} 0)^A$\qquad $(M^{A\land B} 1)^B$ % $\pi_0(M^{A\land B})^A$\qquad $\pi_1(M^{A\land B})^B$ \\ &\\ \hline & \\ \AxiomC{[$u \colon A$]} \noLine \UnaryInfC{$\phantom{M} \mid M$} \noLine \UnaryInfC{$B$} \RightLabel{$\impI \, u$} \UnaryInfC{$A\to B$} \DisplayProof & $(\lambda u^A M^B)^{A\to B}$ \\ &\\ \hline & \\ \AxiomC{$\phantom{M} \mid M$} \noLine \UnaryInfC{$A\to B$} \AxiomC{$\phantom{N} \mid N$} \noLine \UnaryInfC{$A$} \RightLabel{$\impE$} \BinaryInfC{$B$} \DisplayProof & $(M^{A\to B} N^A)^B$ \\ &\\ \hline & \\ \AxiomC{$\phantom{M} \mid M$} \noLine \UnaryInfC{$A$} \RightLabel{$\allI \, x$\quad\hbox{(with var.cond.)}} \UnaryInfC{$\forall x A$} \DisplayProof & $(\lambda xM^A)^{\forall x A}$ (with var.cond.) \\ &\\ \hline & \\ \AxiomC{$\phantom{M} \mid M$} \noLine \UnaryInfC{$\forall x A$} \AxiomC{$t$} \RightLabel{$\allE$} \BinaryInfC{$\subst{A}{x}{t}$} \DisplayProof & $(M^{\forall x A}t)^{\subst{A}{x}{t}}$ \\ &\\ \hline \end{tabular} \end{center} \caption{Derivation terms for $\land$, $\to$ and $\forall$} \end{table} Notice that we have left out the standard connectives $\ex$ and $\lorc$, although they could easily be included, with the rules given below. The reason for this omission is that for simplicity we want our derivation terms to be pure lambda terms formed just by lambda abstraction, application, pairing and projections. This would be violated by the rules for $\ex$ and $\lorc$, which require additional constructs. \begin{table} \label{Tab:minNatorex} \begin{center} \begin{tabular}{|c|c|} \hline & \\ derivation & term \\ &\\ \hline & \\ \AxiomC{$t$} \AxiomC{$\phantom{M} \mid M$} \noLine \UnaryInfC{$\subst{A}{x}{t}$} \RightLabel{$\excI$} \BinaryInfC{$\ex x A$} \DisplayProof & $\bigl(\excI_{x,A} t M^{\subst{A}{x}{t}} \bigr)^{\ex x A}$ \\ &\\ \hline & \\ \AxiomC{$\phantom{M} \mid M$} \noLine \UnaryInfC{$\ex x A$} \AxiomC{$[u \colon A]$} \noLine \UnaryInfC{$\phantom{N} \mid N$} \noLine \UnaryInfC{$B$} \RightLabel{$\excE \, u$ (var.cond.)} \BinaryInfC{$B$} \DisplayProof & $\bigl(%\excE_{x,A,B} M^{\ex x A}(u^A. N^B)\bigr)^B$ (var.cond.) \\ &\\ \hline & \\ \AxiomC{$\phantom{M} \mid M$} \noLine \UnaryInfC{$A$} \RightLabel{$\orcIzero$} \UnaryInfC{$A\lorc B$} \DisplayProof \quad \AxiomC{$\phantom{M} \mid M$} \noLine \UnaryInfC{$B$} \RightLabel{$\orcIone$} \UnaryInfC{$A\lorc B$} \DisplayProof & $\bigl(\orcI_{0,B} M^A\bigr)^{A\lorc B}$ \, $\bigl(\orcI_{1,A} M^B\bigr)^{A\lorc B}$ \\ &\\ \hline & \\ \AxiomC{$\phantom{M} \mid M$} \noLine \UnaryInfC{$A \lorc B$} \AxiomC{$[u \colon A]$} \noLine \UnaryInfC{$\phantom{N} \mid N$} \noLine \UnaryInfC{$C$} \ \AxiomC{$[v \colon B]$} \noLine \UnaryInfC{$\phantom{K} \mid K$} \noLine \UnaryInfC{$C$} \RightLabel{$\orcE \, u,v$} \insertBetweenHyps{\hskip.3cm} \TrinaryInfC{$C$} \DisplayProof & $\bigl(M^{A \lorc B}(u^A. N^C, v^B. K^C)\bigr)^C$ \\ &\\ \hline \end{tabular} \end{center} \caption{Derivation terms for $\ex$ and $\lorc$} \end{table} In spite of this omission we can use $\ex$ and $\lorc$ in our logic, if we allow appropriate axioms as constant derivation terms, e.g.\ for $\ex$ \begin{align*} \excI_{x,A} \colon& \forall x. A\to \ex x A\\ \excE_{x,A,B} \colon& \ex x A\to(\forall x.A\to B)\to B \end{align*} with the usual proviso $x\notin \FV(B)$. For $\lorc$ we could introduce similar axioms; however, we do not do so here, since in the presence of e.g.\ the booleans we can define $\lorc$ from $\ex$ via \[ A \lorc B :\equiv \ex p. (p = \true \to A) \land (p = \false \to B). \] Notice that there is one price to pay in this approach: derivations in normal form are not as normal as they could be. In particular, in the presence of the constants $\excI_{x,A}$ and $\excE_{x,A,B}$ the subformula property clearly is weaker than than it would be with the $\ex, \lorc$-rules and \emph{permutative conversion}\index{conversion!permutative}: permute an elimination immediately following an $\ex, \lorc$-rule over this rule to the minor premise. \section{Embedding Classical and Intuitionistic Logic} \mylabel{S:Class} In minimal logic all propositional symbols play the same role. We now distinguish a special propositional symbol: $\falsum$, to be read \inquotes{falsum}\index{falsum}. We then obtain classical and intuitionistic logic by allowing appropriate additional assumptions. \subsection{Ex-Falso-Quodlibet and Stability} \mylabel{SS:EfqStab} To obtain \emph{intuitionistic logic}\index{logic!intuitionistic} we use as additional assumptions the \emph{ex-falso-quodlibet}\index{ex-falso-quodlibet} formulas $\Efq_R$ for every predicate symbol $R$ different from $\falsum$: \[ \forall \vec{x}. \falsum \to R \vec{x}. \eqno(\Efq_R) \] Similarly one obtains \emph{classical logic}\index{logic!classical} by allowing for every predicate symbol $R$ different from $\falsum$ the \indexentry{principle of indirect proof} as an additional assumption, i.e.\ the formula \[ \forall\vec{x}. \neg \neg R \vec{x} \to R \vec{x};\eqno(\Stab_R) \] this formula is also called \indexentry{stability} of $R$. Notice that with $\falsum$ for $R \vec{x}$ both formulas are trivially derivable; e.g.\ for stability we have $\neg \neg \falsum \to \falsum = ((\falsum \to \falsum) \to \falsum) \to \falsum$. The derivation is \[ \AxiomC{$v \colon( \falsum \to \falsum)\to \falsum$} \AxiomC{$u \colon \falsum$} \RightLabel{$\impI u$} \UnaryInfC{$\falsum \to \falsum$} \BinaryInfC{$\falsum$} \DisplayProof \] Let \begin{align*} \Efq &:= \set{\Efq_R}{\hbox{$R$ predicate symbol $\ne \falsum$}}, \\ \Stab &:=\set{\Stab_R}{\hbox{$R$ predicate symbol $\ne \falsum$}}. \end{align*} We call the formula $B$ \emph{classically (intuitionistically) derivable} and write $\vdash_c B$ ($\vdash_i B$) if there is a derivation of $B$ from stability assumptions $\Stab_A$ (ex-falso-quodlibet assumptions $\Efq_A$). Similarly we define classical (intuitionistic) derivability from $\Gamma$ and write $\Gamma\vdash_c B$ ($\Gamma\vdash_i B$), i.e. \begin{align*} \Gamma \vdash_i B &\defiff \Gamma \cup \Efq \vdash B, \\ \Gamma \vdash_c B &\defiff \Gamma \cup \Stab \vdash B. \end{align*} \begin{lemma*}[Ex-falso-quodlibet] $\vdash_i \falsum \to A$ for every formula $A$. \end{lemma*} \begin{proof} By induction on $A$ we construct for every formula $A$ a derivation $\C{D}_A$ of $\falsum \to A$. \emph{Case} $A$ atomic formula. Use $\Efq_A$. \emph{Case} $A\land B$. \[ \AxiomC{$\C{D}_A$} \noLine \UnaryInfC{$\falsum \to A$} \AxiomC{$u \colon \falsum$} \BinaryInfC{$A$} \AxiomC{$\C{D}_B$} \noLine \UnaryInfC{$\falsum \to B$} \AxiomC{$u \colon \falsum$} \BinaryInfC{$B$} \BinaryInfC{$A\land B$} \RightLabel{$\impI u$} \UnaryInfC{$\falsum \to A\land B$} \DisplayProof \] \emph{Case} $A \to B$. \[ \AxiomC{$\C{D}_B$} \noLine \UnaryInfC{$\falsum \to B$} \AxiomC{$u \colon \falsum$} \BinaryInfC{$B$} \UnaryInfC{$A \to B$} \RightLabel{$\impI u$} \UnaryInfC{$\falsum \to A \to B$} \DisplayProof \] \emph{Case} $\forall x A$. \[ \AxiomC{$\C{D}_A$} \noLine \UnaryInfC{$\falsum \to A$} \AxiomC{$u \colon \falsum$} \BinaryInfC{$A$} \UnaryInfC{$\forall x A$} \RightLabel{$\impI u$} \UnaryInfC{$\falsum \to \forall x A$} \DisplayProof \] \emph{Case} $\ex x A$. \[ \AxiomC{$x$} \AxiomC{$\C{D}_A$} \noLine \UnaryInfC{$\falsum \to A$} \AxiomC{$u \colon \falsum$} \BinaryInfC{$A$} \BinaryInfC{$\ex x A$} \RightLabel{$\impI u$} \UnaryInfC{$\falsum \to \ex x A$} \DisplayProof \] This concludes the proof. \end{proof} \begin{lemma*}[Stability] For every formula $A$ without $\ex$, $\vdash_c \neg \neg A \to A$. \end{lemma*} \begin{proof} Induction on $A$. In the constructed derivations we omit (for brevity) the introductions at the end. \emph{Case} $A$ atomic formula. Use $\Stab_A$. \emph{Case} $A \land B$. Use $\vdash( \neg \neg A \to A) \to ( \neg \neg B \to B) \to \neg \neg(A \land B) \to A \land B$, which can be derived easily from $\vdash \neg \neg(A \land B) \leftrightarrow (\neg \neg A \land \neg \neg B)$ (Exercise: derive the latter formula). \emph{Case} $A \to B$. Use $\vdash(\neg \neg B \to B) \to \neg \neg( A \to B) \to A \to B$. A derivation is \[ \AxiomC{$u \colon \neg \neg B \to B$} \AxiomC{$v \colon \neg \neg(A \to B)$} \AxiomC{$u_1 \colon \neg B$} \AxiomC{$u_2 \colon A \to B$} \AxiomC{$w \colon A$} \BinaryInfC{$B$} \BinaryInfC{$\falsum$} \RightLabel{$\impI u_2$} \UnaryInfC{$\neg(A \to B)$} \BinaryInfC{$\falsum$} \RightLabel{$\impI u_1$} \UnaryInfC{$\neg \neg B$} \BinaryInfC{$B$} \DisplayProof \] \emph{Case} $\forall x A$. Clearly it suffices to show $\vdash(\neg \neg A \to A) \to \neg \neg \forall x A \to A$. A derivation is \[ \AxiomC{$u \colon \neg \neg A \to A$} \AxiomC{$v \colon \neg \neg \forall x A$} \AxiomC{$u_1 \colon \neg A$} \AxiomC{$u_2 \colon \forall x A$} \AxiomC{$x$} \BinaryInfC{$A$} \BinaryInfC{$\falsum$} \RightLabel{$\impI u_2$} \UnaryInfC{$\neg \forall x A$} \BinaryInfC{$\falsum$} \RightLabel{$\impI u_1$} \UnaryInfC{$\neg \neg A$} \BinaryInfC{$A$} \DisplayProof \] This concludes the proof. \end{proof} \begin{lemma*} $\Gamma \vdash A \imp \Gamma \vdash_i A$ and $\Gamma \vdash_i A \imp \Gamma \vdash_c A$. \end{lemma*} \begin{proof} It suffices to show $\vdash_c \Efq_A$. This can be seen as follows; for brevity assume $R$ to be unary. \[ \AxiomC{$\forall x. \neg \neg Rx \to Rx$} \AxiomC{$x$} \BinaryInfC{$\neg \neg Rx \to Rx$} \AxiomC{$u \colon \falsum$} \RightLabel{$\impI v^{\neg Rx}$} \UnaryInfC{$\neg \neg Rx$} \BinaryInfC{$Rx$} \RightLabel{$\impI u$} \UnaryInfC{$\falsum \to Rx$} \RightLabel{$\allI$} \UnaryInfC{$\forall x. \falsum \to Rx$} \DisplayProof \] This concludes the proof. \end{proof} % The constructions in Lemmata~\ref{L:efq} and \ref{L:stab} can easily % be implemented. In \textsc{Minlog} the commands are % \texttt{proof-of-stab-at} and \texttt{proof-of-efq-at}. Note that neither of the two implications can be reversed. Counterexamples are \begin{align*} &\not\vdash\falsum \to P,\quad\hbox{but}\quad\vdash_i\falsum \to P, \\ &\not\vdash_i ((P \to Q) \to P) \to P,\quad\hbox{but}\quad\vdash_c ((P \to Q) \to P) \to P. \end{align*} Apart from $\not\vdash_i ((P \to Q) \to P) \to P$ the proofs are easy. However, to prove the non-derivability of the Peirce formula $((P \to Q) \to P) \to P$ in intuitionistic logic requires a more careful study of intuitionistic derivability. \begin{lemma*}[Proof by cases] $\vdash_c(A \to B) \land(\neg A \to B) \to B$. \end{lemma*} \begin{proof} Let $C$ abbreviate $(A \to B) \land(\neg A \to B)$ \[ \AxiomC{$\C{D}_{\Stab}$} \noLine \UnaryInfC{$\neg \neg B \to B$} \AxiomC{$w\colon \neg B$} \AxiomC{$u \colon C$} \UnaryInfC{$\neg A \to B$} \AxiomC{$w\colon \neg B$} \AxiomC{$u \colon C$} \UnaryInfC{$A \to B$} \AxiomC{$v \colon A$} \BinaryInfC{$B$} \BinaryInfC{$\falsum$} \RightLabel{$\impI v$} \UnaryInfC{$\neg A$} \BinaryInfC{$B$} \BinaryInfC{$\falsum$} \RightLabel{$\impI w$} \UnaryInfC{$\neg \neg B$} \BinaryInfC{$B$} \DisplayProof \] where $\C{D}_{\Stab}$ is a derivation provided by the Stability Lemma.. \end{proof} \subsection{Equivalence} \mylabel{SS:Equiv} Call two formulas $A$ and $B$ \emph{equivalent}\index{formulas!equivalent} in minimal (classical, intuitionistic) logic if $\vdash A\leftrightarrow B$ ($\vdash_c A\leftrightarrow B$, $\vdash_i A\leftrightarrow B$). \begin{lemma*}[Equivalence] Let $\vdash_{mic}\in \{\vdash,\vdash_i,\vdash_c\}$. Then $\vdash_{mic} A_1\leftrightarrow A_2$ and $B_2$ is obtained from $B_1$ by replacing one subformula $A_1$ of $B_1$ by $A_2$, then we also have $\vdash_{mic} B_1\leftrightarrow B_2$. \end{lemma*} \begin{proof} Induction on $B_1$. If all of $B_1$ had been replaced, the claim is obvious. Otherwise $B_1$ must be a composed formula. \emph{Case} $C_1 \land D_1$. Assume the replacement takes places in $C_1$. We have to show $\vdash_{mic} C_1 \land D_1\leftrightarrow C_2 \land D_1$. $\to$: \[ \AxiomC{$\C{D}$} \noLine \UnaryInfC{$C_1 \to C_2$} \AxiomC{$C_1 \land D_1$} \UnaryInfC{$C_1$} \BinaryInfC{$C_2$} \AxiomC{$C_1 \land D_1$} \UnaryInfC{$D_1$} \BinaryInfC{$C_2 \land D_1$} \DisplayProof \] where $\C{D}$ is known by induction hypothesis. $\leftarrow$ is proved similarly. \emph{Case} $C_1 \to D_1$. Assume the replacement takes places in $C_1$. We have to show $\vdash_{mic}(C_1 \to D_1)\leftrightarrow(C_2 \to D_1)$. $\to$: \[ \AxiomC{$C_1 \to D_1$} \AxiomC{$\C{D}$} \noLine \UnaryInfC{$C_2 \to C_1$} \AxiomC{$u \colon C_2$} \BinaryInfC{$C_1$} \BinaryInfC{$D_1$} \RightLabel{$\impI u$} \UnaryInfC{$C_2 \to D_1$} \DisplayProof \] where again $\C{D}$ is known by induction hypothesis. $\leftarrow$ is proved similarly. Assume now that the replacement takes places in $D_1$. We must show $\vdash_{mic}(C_1 \to D_1)\leftrightarrow(C_1 \to D_2)$. $\to$: \[ \AxiomC{$\C{D}$} \noLine \UnaryInfC{$D_1 \to D_2$} \AxiomC{$C_1 \to D_1$} \AxiomC{$u \colon C_1$} \BinaryInfC{$D_1$} \BinaryInfC{$D_2$} \RightLabel{$\impI u$} \UnaryInfC{$C_1 \to D_2$} \DisplayProof \] where again $\C{D}$ is known by induction hypothesis. $\leftarrow$ is proved similarly. \emph{Case} $\forall xC_1$. We must show $\vdash_{mic}\forall xC_1\leftrightarrow\forall xC_2$. $\to$: \[ \AxiomC{$\C{D}$} \noLine \UnaryInfC{$C_1 \to C_2$} \AxiomC{$\forall xC_1$} \AxiomC{$x$} \BinaryInfC{$C_1$} \BinaryInfC{$C_2$} \UnaryInfC{$\forall xC_2$} \DisplayProof \] where again $\C{D}$ is known by induction hypothesis. Observe that $\C{D}$ does not contain free assumptions. $\leftarrow$ is proved similarly. \emph{Case} $\ex xC_1$. Similar. \end{proof} \section{Glivenko's Theorem} \mylabel{S:Glivenko} As an illustration of what can be done in intuitionistic propositional logic we prove Glivenko's theorem, which says that every negation, which is a classical tautology, can be derived intuitionistically. \subsection{Valuations} \mylabel{SS:Casesneg} \begin{lemma*} $\vdash (A \to B) \to (\neg A \to B) \to \neg \neg B$. \end{lemma*} \begin{proof} Assume $\neg B$. Then $\neg A$, hence $B$, hence $\falsum$. \end{proof} For $\sigma \in \{0,1 \}$ let \[ A^{\sigma} := \begin{cases} A &\text{if $\sigma = 1$}\\ \neg A &\text{if $\sigma = 0$} \end{cases} \] A \indexentry{valuation} $v$ is a mapping from propositional variables into \indexentry{truth values}, which are taken here to be $0$ and $1$, for falsity and truth. Call a formula a \indexentry{tautology} if for every valuation $v$ its truth value $v(A)$ is $1$. For $* \in \{\to, \land \}$ let $\sigma * \tau$ be the truth value given by the well-known truth table for the connective $*$. \subsection{Intuitionistic Derivability and Valuations} \mylabel{SS:val} \begin{lemma*} $\vdash_{i} A^{\sigma} \to B^{\tau} \to (A * B)^{\sigma * \tau}$ for $* \in \{\to, \land \}$. \end{lemma*} \begin{proof} \emph{Case} $\to$. \begin{align*} &\vdash B \to A \to B \\ &\vdash A \to \neg B \to \neg(A \to B) \\ &\vdash_{i} \neg A \to A \to B \end{align*} \emph{Case} $\land$. \begin{align*} &\vdash A \to B \to A \land B \\ &\vdash \neg A \to \neg(A \land B) \\ &\vdash \neg B \to \neg(A \land B) \end{align*} Notice that these formulas are slightly stronger than required. \end{proof} \begin{lemma*} Let $A$ be a formula in propositional logic, and let $P_1, \dots, P_n$ be all propositional variables in $A$. The we have, for every valuation $v$, \[ \vdash_i P_1^{v(P_1)} \to \dots \to P_n^{v(P_n)} \to A^{v(A)}. \] \end{lemma*} \begin{proof} By induction on $A$. \emph{Case} $P_i$. Clear. \emph{Case} $A * B$. Use \[ \vdash_{i} A^{v(A)} \to B^{v(B)} \to (A * B)^{v(A) * v(B)}, \] which holds by the previous lemma. \end{proof} \subsection{Glivenko's Theorem} \mylabel{SS:Glivenko} \begin{theorem*}[Glivenko] $\vdash_i \neg A$ for $\neg A$ a tautology. \end{theorem*} \begin{proof} By the final lemma in \ref{SS:val} we have, for every valuation $v$, \[ \vdash_i P_1^{v(P_1)} \to \dots \to P_n^{v(P_n)} \to \neg A, \] since by assumption $v(\neg A)=1$. We show \[ \vdash_i P_1^{v(P_1)} \to \dots \to P_k^{v(P_k)} \to \neg A \quad \hbox{for every valuation $v$,} \] by induction on $i := n-k$. \emph{Base} $i=0$. Clear, by what we just noted. \emph{Step}. We must show \[ \vdash_i P_1^{v(P_1)} \to \dots \to P_{k-1}^{v(P_{k-1})} \to \neg A. \] By IH we have \begin{align*} &\vdash_i P_1^{v(P_1)} \to \dots \to P_{k-1}^{v(P_{k-1})} \to P_k \to \neg A, \\ &\vdash_i P_1^{v(P_1)} \to \dots \to P_{k-1}^{v(P_{k-1})} \to \neg P_k \to \neg A. \end{align*} Using the lemma in \ref{SS:Casesneg} we obtain \[ \vdash_i P_1^{v(P_1)} \to \dots \to P_{k-1}^{v(P_{k-1})} \to \neg \neg \neg A. \] The claim follows from $\vdash \neg \neg \neg A \to \neg A$. -- Applying the inductively proven claim with $i=n$ (i.e., $k=0$) yields $\vdash_i \neg A$, as required. \end{proof} \section{Negative Translation} \mylabel{S:NegTrans} Having defined classical and intuitionistic logic out of minimal logic by adding axioms, we next show that in fact, both logics can be embedded into minimal logic, as long as we restrict ourselves to the language based on $\{{\to}, {\land}, {\forall}, {\falsum}\}$. This restriction will be in force for the rest of the present chapter. \subsection{Negative Formulas} \mylabel{SS:NegFlas} A formula $A$ of the $\{{\to}, {\land}, {\forall}, {\falsum}\}$-language is called \emph{negative}\index{formula!negative}, if every atomic formula of $A$ different from $\falsum$ occurs negated. \begin{lemma*} For negative $A$ we have $\vdash\neg \neg A \to A$. \end{lemma*} \begin{proof} Use the Stability Lemma and $\vdash\neg \neg\neg R \vec{t} \to\neg R \vec{t}$. \end{proof} We now define the G\"odel-Gentzen (or \inquotes{negative}) translation of classical logic into minimal logic. The basic idea is to double negate every atomic formula. \begin{definition*} [Negative translation $^g$ of G\"odel-Gentzen\index{Goedel-Gentzen-translation@G\"odel-Gentzen translation $^g$}] \begin{alignat*}{2} &\falsum^g &&:= \falsum, \\ &R \vec{t}^g &&:= \neg \neg R \vec{t}, \\ &(A \land B)^g &&:= A^g \land B^g, \\ &(A \to B)^g &&:= A^g \to B^g, \\ &(\forall x A)^g &&:= \forall x A^g. \end{alignat*} \end{definition*} Notice that $A^g$ is the same as $A$ for negative $A$. \begin{theorem*} For all formulas $A$ we have \begin{enumeratea} \item \label{T:negtrans:a} $\vdash_c A \leftrightarrow A^g$, \item \label{T:negtrans:b} $\Gamma \vdash_c A$ iff $\Gamma^g \vdash A^g$, where $\Gamma^g := \set{B^g}{B \in \Gamma}$. \end{enumeratea} \end{theorem*} \begin{proof} (a). The claim follows easily from the Equivalence Lemma.. (b) $\Leftarrow$. Obvious. $\Rightarrow$. By induction on the classical derivation. For a stability assumption $\neg \neg R \vec{t} \to R \vec{t}$ we have $(\neg \neg R \vec{t} \to R \vec{t})^g = \neg \neg R \vec{t}\to\neg \neg R \vec{t}$, and this is easily derivable. \emph{Case} $\impI$. Assume \[ \AxiomC{$[u \colon A]$} \noLine \UnaryInfC{$\C{D}$} \noLine \UnaryInfC{$B$} \RightLabel{$\impI u$} \UnaryInfC{$A\to B$} \DisplayProof \] We have by induction hypothesis \[ \AxiomC{$[u \colon A^g]$} \noLine \UnaryInfC{$\C{D}^g$} \noLine \UnaryInfC{$B^g$} \RightLabel{$\impI u$} \UnaryInfC{$A^g\to B^g$} \DisplayProof \] \emph{Case} $\impE$. Assume \[ \AxiomC{$\C{D}_0$} \noLine \UnaryInfC{$A \to B$} \AxiomC{$\C{D}_1$} \noLine \UnaryInfC{$A$} \BinaryInfC{$B$} \DisplayProof \] We have by induction hypothesis \[ \AxiomC{$\C{D}_0^g$} \noLine \UnaryInfC{$A^g\to B^g$} \AxiomC{$\C{D}_1^g$} \noLine \UnaryInfC{$A^g$} \BinaryInfC{$B^g$} \DisplayProof \] The other cases are treated similarly. \end{proof} \begin{corollary*}[Embedding of classical logic] For negative $A$, \[ \vdash_c A\iff \vdash A. \] \end{corollary*} \begin{proof} By the theorem we have $\vdash_c A$ iff $\vdash A^g$. Since $A$ is negative, every atom distinct from $\falsum$ in $A$ must occur negated, as $\neg R \vec{t}$, and hence in $A^g$ it appears in same form. \end{proof} Since every formula is classically equivalent to a negative formula, we have achieved an embedding of classical logic into minimal logic. Note that $\not\vdash\neg \neg P\to P$. % (as we shall show in section \ref{Compl}). The corollary therefore does not hold for all formulas $A$. \subsection{Formulas Implying Their Negative Translation} \mylabel{SS:Spreading} We introduce a further observation (due to Leivant; see Troelstra and van Dalen \cite[Ch.2, Sec.3]{TroelstravanDalen88}) which will be useful for program extraction from classical proofs (cf.\ Chapter~\ref{C:Class}). There it will be necessary to actually transform a given classical derivation $\vdash_c A$ into a minimal logic derivation $\vdash A^g$. In particular, for every assumption constant $C$ used in the given derivation we have to provide a derivation of $C^g$. Now for some formulas $S$ -- the so-called \emph{spreading} formulas -- this is immediate, for we can derive $S \to S^g$, and hence can use the original assumption constant. Recall that our formulas may contain \emph{predicate variables}\index{predicate variable} denoted by $X$, which are place holders for comprehension terms, i.e.\ formulas with distinguished variables. We use the obvious notation $\subst{A}{X}{\set{\vec{x}}{B}}$ or shortly $A[\set{\vec{x}}{B}]$ or even $A[B]$ for substitution for predicate variables. Clearly the G\"odel-Gentzen translation of $X \vec{t}$ is $\neg \neg X \vec{t}$. Recall also that an assumption constant consists of an uninstantiated formula (e.g.\ $X 0 \to (\forall n. X n \to X(n+1)) \to \forall n X n$ for induction) together with a substitution of comprehension terms for predicate variables (e.g.\ $X \mapsto \set{n}{n < n+1}$). Then in order to immediately obtain a derivation of $C^g$ for $C$ an assumption constant it suffices to know that its \emph{uninstantiated} formula $S$ is spreading, for then we generally have $\vdash S[\vec{A}^g] \to S[\vec{A}]^g$ (see the theorem below) and hence can use the same assumption constant with a different substitution. We define \emph{spreading} formulas\index{formula!spreading} $S$, \emph{wiping} formulas\index{formula!wiping} $W$ and \emph{isolating} formulas\index{formula!isolating} $I$ inductively. \begin{alignat*}{2} S \;\;&:=\;\; \falsum \mid R \vec{t} \mid X \vec{t} \mid S \land S \mid I \to S \mid \forall x S, \\ W \;\;&:=\;\; \falsum \mid X \vec{t} \mid W \land W \mid S \to W \mid \forall x W, \\ I \;\;&:=\;\; R \vec{t} \mid W \mid I \land I. \end{alignat*} Let $\C{S}$ ($\C{W}, \C{I}$) be the class of spreading (wiping, isolating) formulas. \begin{theorem*} \begin{alignat*}{2} &\vdash S[\vec{A}^g] \to S[\vec{A}]^g &\quad& \text{for every spreading formula $S$,} \\ &\vdash W[\vec{A}]^g \to W[\vec{A}^g] &&\text{for every wiping formula $W$,} \\ &\vdash I[\vec{A}]^g \to \neg \neg I[\vec{A}^g] &&\text{for every isolating formula $I$.} \end{alignat*} We assume here that all occurrences of predicate variables are substituted. \end{theorem*} \begin{proof} For brevity we write $S^g$ for $S[\vec{A}]^g$ and $S$ for $S[\vec{A}^g]$, and similarly for $W$ and $I$. \emph{Case} $\falsum \in \C{S}$. We must show $\vdash \falsum \to \falsum^g$. Take $\lambda u^{\falsum} u$. \emph{Case} $R \vec{t} \in \C{S}$. We must show $\vdash R \vec{t} \to \neg \neg R \vec{t}$. Take $\lambda u^{R \vec{t}} \lambda v^{\neg R \vec{t}}.vu$. \emph{Case} $X \vec{t} \in \C{S}$, with $X$ substituted by $\set{\vec{x}}{A}$. We must show $\vdash A^g[\vec{t}] \to A^g[\vec{t}]$, which is trivial. \emph{Case} $S_1 \land S_2 \in \C{S}$. We must show $\vdash S_1 \land S_2 \to S_1^g \land S_2^g$. Take \[ \AxiomC{IH} \noLine \UnaryInfC{$S_1 \to S_1^g$} \AxiomC{$u \colon S_1 \land S_2$} \UnaryInfC{$S_1$} \BinaryInfC{$S_1^g$} \AxiomC{IH} \noLine \UnaryInfC{$S_2 \to S_2^g$} \AxiomC{$u \colon S_1 \land S_2$} \UnaryInfC{$S_2$} \BinaryInfC{$S_2^g$} \BinaryInfC{$S_1^g \land S_2^g$} \DisplayProof \] \emph{Case} $I \to S \in \C{S}$. We must show $\vdash (I \to S) \to I^g \to S^g$. Recall that $\vdash \neg \neg S^g \to S^g$ by the Stability Lemma, because $S^g$ is negative. Take \[ \AxiomC{Stab} \noLine \UnaryInfC{$\neg \neg S^g \to S^g$} \AxiomC{IH} \noLine \UnaryInfC{$I^g \to \neg \neg I$} \AxiomC{$v \colon I^g$} \insertBetweenHyps{\hspace{.5em}} \BinaryInfC{$\neg \neg I$} \AxiomC{$w_1 \colon \neg S^g$} \AxiomC{IH} \noLine \UnaryInfC{$S \to S^g$} \AxiomC{$u \colon I \to S$} \AxiomC{$w_2 \colon I$} \insertBetweenHyps{\hspace{.5em}} \BinaryInfC{$S$} \BinaryInfC{$S^g$} \insertBetweenHyps{\hspace{.5em}} \BinaryInfC{$\falsum$} \RightLabel{$\impI w_2$} \UnaryInfC{$\neg I$} \insertBetweenHyps{\hspace{.5em}} \BinaryInfC{$\falsum$} \RightLabel{$\impI w_1$} \UnaryInfC{$\neg \neg S^g$} \insertBetweenHyps{\hspace{-2em}} \BinaryInfC{$S^g$} \DisplayProof \] \emph{Case} $\forall x S \in \C{S}$. We must show $\vdash \forall x S \to \forall x S^g$. Take \[ \AxiomC{IH} \noLine \UnaryInfC{$S \to S^g$} \AxiomC{$u \colon \forall x S$} \AxiomC{$x$} \BinaryInfC{$S$} \BinaryInfC{$S^g$} \DisplayProof \] \emph{Case} $\falsum \in \C{W}$. We must show $\vdash \falsum^g \to \falsum$. Take $\lambda u^{\falsum} u$. \emph{Case} $X \vec{t} \in \C{W}$, with $X$ substituted by $\set{\vec{x}}{A}$. We must show $\vdash A^g[\vec{t}] \to A^g[\vec{t}]$, which is trivial. \emph{Case} $W_1 \land W_2 \in \C{W}$. We must show $\vdash W_1^g \land W_2^g \to W_1 \land W_2$. Take \[ \AxiomC{IH} \noLine \UnaryInfC{$W_1^g \to W_1$} \AxiomC{$u \colon W_1^g \land W_2^g$} \UnaryInfC{$W_1^g$} \BinaryInfC{$W_1$} \AxiomC{IH} \noLine \UnaryInfC{$W_2^g \to W_2$} \AxiomC{$u \colon W_1^g \land W_2^g$} \UnaryInfC{$W_2^g$} \BinaryInfC{$W_2$} \BinaryInfC{$W_1 \land W_2$} \DisplayProof \] \emph{Case} $S \to W \in \C{W}$. We must show $\vdash (S^g \to W^g) \to S \to W$. Take \[ \AxiomC{IH} \noLine \UnaryInfC{$W^g \to W$} \AxiomC{$u \colon S^g \to W^g$} \AxiomC{IH} \noLine \UnaryInfC{$S \to S^g$} \AxiomC{$v \colon S$} \BinaryInfC{$S^g$} \BinaryInfC{$W^g$} \BinaryInfC{$W$} \DisplayProof \] \emph{Case} $\forall x W \in \C{W}$. We must show $\vdash \forall x W^g \to \forall x W$. Take \[ \AxiomC{IH} \noLine \UnaryInfC{$W^g \to W$} \AxiomC{$u \colon \forall x W^g$} \AxiomC{$x$} \BinaryInfC{$W^g$} \BinaryInfC{$W$} \DisplayProof \] \emph{Case} $R \vec{t} \in \C{I}$. We must show $\vdash \neg \neg R \vec{t} \to \neg \neg R \vec{t}$, which is trivial. \emph{Case} $W \in \C{I}$. We must show $\vdash W^g \to \neg \neg W$, which trivially follows from the IH $\vdash W^g \to W$. Take \[ \AxiomC{$v \colon \neg W$} \AxiomC{IH} \noLine \UnaryInfC{$W^g \to W$} \AxiomC{$u \colon W^g$} \BinaryInfC{$W$} \BinaryInfC{$\falsum$} \DisplayProof \] \emph{Case} $I_1 \land I_2 \in \C{I}$. We must show $\vdash I_1^g \land I_2^g \to \neg \neg (I_1 \land I_2)$. Take \[ \AxiomC{IH} \noLine \UnaryInfC{$I_2^g \to \neg \neg I_2$} \AxiomC{$I_1^g {\land} I_2^g$} \UnaryInfC{$I_2^g$} \BinaryInfC{$\neg \neg I_2$} \AxiomC{IH} \noLine \UnaryInfC{$I_1^g \to \neg \neg I_1$} \AxiomC{$I_1^g {\land} I_2^g$} \UnaryInfC{$I_1^g$} \BinaryInfC{$\neg \neg I_1$} \AxiomC{$\neg (I_1 \land I_2)$} \AxiomC{$I_1$} \AxiomC{$I_2$} \insertBetweenHyps{\hspace{.5em}} \BinaryInfC{$I_1 \land I_2$} \BinaryInfC{$\falsum$} % \RightLabel{$\impI w_1$} \UnaryInfC{$\neg I_1$} \BinaryInfC{$\falsum$} % \RightLabel{$\impI w_2$} \UnaryInfC{$\neg I_2$} \BinaryInfC{$\falsum$} \DisplayProof \] This completes the proof. \end{proof} Notice that the G\"odel-Gentzen translation double negates every atom, and hence may produce triple negations. However, because of $\vdash \neg \neg \neg A \leftrightarrow \neg A$ and the Equivalence Lemma we can systematically replace triple negations by single negations. As a guide for the implementation, we carry out some of the details here. Let $A^*$ (the \emph{reduced form} of $A$) be obtained from $A$ by replacing $\neg \neg \neg A$ by $\neg A$ whenever possible. So \begin{alignat*}{2} &\falsum^* &&:= \falsum, \\ &R \vec{t}^* &&:= R \vec{t}, \\ &(A \land B)^* &&:= A^* \land B^*, \\ &(A \to B)^* &&:= \begin{cases} (\neg C)^* &\hbox{if $A \to B = \neg \neg \neg C$}\\ A^* \to B^*, &\hbox{otherwise}\end{cases} \\ &(\forall x A)^* &&:= \forall x A^*. \end{alignat*} We simultaneously construct derivations of $A \to A^*$ and $A^* \to A$. \begin{lemma*} \begin{enumeratea} \item $\vdash A \to A^*$, \item $\vdash A^* \to A$. \end{enumeratea} \end{lemma*} \begin{proof} \emph{Case} $A$ prime formula. Then $A^*$ is $A$, and we can take $\lambda u^A u$. \emph{Case} $\forall x A$. (a). We must show $\vdash \forall x A \to \forall x A^*$. Take \[ \AxiomC{IH(a)} \noLine \UnaryInfC{$A \to A^*$} \AxiomC{$u \colon \forall x A$} \AxiomC{$x$} \BinaryInfC{$A$} \BinaryInfC{$A^*$} \DisplayProof \] (b). We must show $\vdash \forall x A^* \to \forall x A$. Take \[ \AxiomC{IH(b)} \noLine \UnaryInfC{$A^* \to A$} \AxiomC{$u \colon \forall x A^*$} \AxiomC{$x$} \BinaryInfC{$A^*$} \BinaryInfC{$A$} \DisplayProof \] \emph{Case} $\neg \neg \neg A$. (a). We must show $\vdash \neg \neg \neg A \to (\neg A)^*$. \[ \AxiomC{IH(a)} \noLine \UnaryInfC{$\neg A \to (\neg A)^*$} \AxiomC{$u \colon \neg \neg \neg A$} \AxiomC{$v \colon \neg A$} \AxiomC{$w \colon A$} \BinaryInfC{$\falsum$} \RightLabel{$\impI v$} \UnaryInfC{$\neg \neg A$} \BinaryInfC{$\falsum$} \RightLabel{$\impI w$} \UnaryInfC{$\neg A$} \BinaryInfC{$(\neg A)^*$} \DisplayProof \] (b). We must show $\vdash (\neg A)^* \to \neg \neg \neg A$. \[ \AxiomC{$v \colon \neg \neg A$} \AxiomC{IH(b)} \noLine \UnaryInfC{$(\neg A)^* \to \neg A$} \AxiomC{$u \colon (\neg A)^*$} \BinaryInfC{$\neg A$} \BinaryInfC{$\falsum$} \DisplayProof \] \emph{Case} $A \to B$ not of the form $\neg \neg \neg C$. (a). We must show $\vdash (A \to B) \to A^* \to B^*$. \[ \AxiomC{IH(a)} \noLine \UnaryInfC{$B \to B^*$} \AxiomC{$u \colon A \to B$} \AxiomC{IH(b)} \noLine \UnaryInfC{$A^* \to A$} \AxiomC{$v \colon A^*$} \BinaryInfC{$A$} \BinaryInfC{$B$} \BinaryInfC{$B^*$} \DisplayProof \] (b). We must show $\vdash (A^* \to B^*) \to A \to B$. \[ \AxiomC{IH(b)} \noLine \UnaryInfC{$B^* \to B$} \AxiomC{$u \colon A^* \to B^*$} \AxiomC{IH(a)} \noLine \UnaryInfC{$A \to A^*$} \AxiomC{$v \colon A$} \BinaryInfC{$A^*$} \BinaryInfC{$B^*$} \BinaryInfC{$B$} \DisplayProof \] \end{proof} \section{Notes} \mylabel{S:LogicNotes} The proof of Glivenko's theorem is taken form Mints' book \cite{Mints00}. % \include{alg} % $Id: mlcf.tex,v 1.30 2008/01/25 13:30:18 logik Exp $ \chapter{Algebras}\mylabel{C:Alg} A free algebra is given by \emph{constructors}, for instance zero and successor for the natural numbers. We want to treat other data types as well, like lists and binary trees. When dealing with inductively defined sets, it will also be useful to explicitely refer to the generation tree. Such trees are quite often infinitely branching, and hence we allow infinitary free algebras from the outset. The freeness of the constructors is expressed by requiring that their ranges are disjoint and that they are injective. Moreover, we view the free algebra as a domain and require that its bottom element is not in the range of the constructors. Hence the constructors are total and non-strict. For the notion of totality cf.\ \cite[Chapter 8.3]{Stoltenberg94}. To make a free algebra into a domain and still have the constructors injective and with disjoint ranges, we model e.g.\ the natural numbers as shown in Figure~\ref{F:nat}. \begin{figure} % \begin{picture}(168,108) \begin{picture}(170,120) \put(48,0){\makebox(0,0){$\bullet$}} \put(36,0){\makebox(0,0){$\bottom$}} \put(48,0){\line(-1,1){24}} \put(24,24){\makebox(0,0){$\bullet$}} \put(12,24){\makebox(0,0){$0$}} \put(48,0){\line(1,1){24}} \put(72,24){\makebox(0,0){$\bullet$}} \put(90,24){\makebox(0,0){$S \bottom$}} \put(72,24){\line(-1,1){24}} \put(48,48){\makebox(0,0){$\bullet$}} \put(30,48){\makebox(0,0){$S 0$}} \put(72,24){\line(1,1){24}} \put(96,48){\makebox(0,0){$\bullet$}} \put(120,48){\makebox(0,0){$S(S \bottom)$}} \put(96,48){\line(-1,1){24}} \put(72,72){\makebox(0,0){$\bullet$}} \put(48,72){\makebox(0,0){$S(S 0)$}} \put(96,48){\line(1,1){24}} \put(120,72){\makebox(0,0){$\bullet$}} \put(150,72){\makebox(0,0){$S(S(S \bottom))$}} \put(120,72){\line(-1,1){24}} \put(96,96){\makebox(0,0){$\bullet$}} \put(66,96){\makebox(0,0){$S(S(S 0))$}} \put(120,72){\line(1,1){24}} \put(147,99){\makebox(0,0){.}} \put(150,102){\makebox(0,0){.}} \put(153,105){\makebox(0,0){.}} \put(159,111){\makebox(0,0){$\bullet$}} \put(181,111){\makebox(0,0){$\infty$}} \end{picture} \caption{The domain of natural numbers} \label{F:nat} \end{figure} Notice that for more complex algebras we usually need many more \inquotes{infinite} elements; this is a consequence of the closure of domains under suprema. To make dealing with such complex structures less annoying, we will normally restrict attention to the \emph{total} elements of a domain, in this case -- as expected -- the elements labelled $0$, $S 0$, $S(S 0)$ etc. \section{Examples of Finitary and Infinitary Algebras} \mylabel{S:ExAlg} We shall consider some examples of free algebras, generated from constructors. \begin{enumerate} \item The easiest example is the algebra $\typeUnit$\index{unit}, which has just one nullary constructor called $\termUnit$\index{Dummy}. It consists of exactly one element. \item Almost as easy is the next example, the algebra $\typeBool$\index{boole}, which has just two nullary constructors called $\true$ and $\false$ (sometimes we use \texttt{True}\index{True} and \texttt{False}\index{False} instead). It consists of exactly two elements. \item The next example is the simplest algebra with infinitely many elements, the algebra $\typeN$\index{nat} of natural numbers. Its constructors are a nullary constructor $0$ (also written \texttt{Zero}) and a unary constructor $\suc$ (also written \texttt{Succ}). \item We shall also allow parametrized algebras, depending on type parameters. The simplest example is the algebra $\typeL{\rho}$ (sometimes we use (\texttt{list $\rho$})\index{list} instead) of lists of objects of a given type $\rho$. Its constructors are a nullary constructor $\nil$\index{Nil} and a binary constructor $\cons$\index{Cons}; the latter takes an object $x$ of type $\rho$ and a list $l$ and constructs out of these the new list obtained by putting $x$ to the front of $l$. \item Another important parametrized algebra is the disjoint union of the algebras given by types $\rho_1$ and $\rho_2$. It is called ($\rho_1$ \texttt{yplus} $\rho_2$)\index{yplus} (\inquotes{y} from t\emph{y}pe), and is given by two unary constructors $\termSumIntroLeft$\index{Inl} and $\termSumIntroRight$\index{Inr} of types $\rho_1 \to \rho_1 + \rho_2$ and $\rho_2 \to \rho_1 + \rho_2$, respectively. \item We also admit simultaneously generated free algebras. An example is provided by the obvious way to simultaneously generate trees and finite lists (of arbitrary length) of trees. The algebra $\tree$\index{tree} has one nullary constructor $\leaf$\index{Leaf} (taking a natural number as parameter) and one unary constructor $\branch$\index{Branch}, building a tree from a tree list. The algebra $\tlist$\index{tlist} has one nullary constructor $\emp$\index{Empty} and one binary constructor $\tcons$\index{Tcons}; the latter takes an object $t$ of type $\tree$ and a tree list $l$ and constructs out of these the new tree list obtained by putting $t$ to the front of $l$. \end{enumerate} All these algebras are \emph{finitary}\index{algebra!finitary}, in the sense that every constructor takes only finitely many arguments. Then for any two elements of the algebra we can decide whether they are equal. When dealing with inductively defined sets, it will also be useful to explicitely refer to the generation tree. Such trees are quite often countably branching, and hence we also allow \emph{infinitary}\index{algebra!infinitary} free algebras from the outset, whose constructors may take infinitely many arguments. Notice that then equality is \emph{not} decidable any more, and hence needs to be axiomatized. For an example of an infinitary algebra, consider the countable ordinals. They can be seen as generated from a nullary constructor $0$, a unary constructor for the successor and a constructor building the supremum out of a countably infinite list of ordinals (given by a function from the natural numbers to ordinals). Clearly the generation process of the elements of a free algebra allows \emph{recursive}\index{definition!recursive} definitions of functions on such algebras. For the examples above, they have (when no parameters are present) the following form. \begin{enumerate} \item For the algebra $\typeUnit$ we may just define \[ f(\termUnit) = a \] \item Similarly for the algebra $\typeBool$ we can explicitely define \begin{align*} f(\true) &= a, \\ f(\false) &= b. \end{align*} \item For the algebra $\typeN$ of natural numbers we obtain the familiar recursion scheme \begin{align*} f(0) &= a, \\ f(\suc(n)) &= h(n, f(n)). \end{align*} \item For the parametrized algebra $\typeL{\rho}$ we have a very similar recursion scheme: \begin{align*} f(\nil) &= a, \\ f(\cons(x,l)) &= h(x,l,f(l)). \end{align*} \item For the disjoint union of given types $\rho_1$ and $\rho_2$, the algebra does not require recursive calls, and hence the recursion scheme is simply \begin{align*} f(\termSumIntroLeft(x)) &= g(x), \\ f(\termSumIntroRight(y)) &= h(y). \end{align*} \item Somewhat more interesting are simultaneously generated free algebras. Here we may define \begin{align*} f(\leaf(n)) &= h_1(n), \\ f(\branch(l)) &= h_2(l,g(l)), \\ g(\emp) &= b, \\ g(\tcons(x,l)) &= h_3(x,l,f(x),g(l)). \end{align*} \end{enumerate} \section{Recursion, Strong Normalization}\mylabel{S:Rec} % Based on Normalization for simultaneous free algebras % (by Holger Benl, Ulrich Berger and Helmut Schwichtenberg) % in papers/temp00/n.tex We give a predicative proof of strong normalization for terms with recursion operators in a system of simultaneously defined free algebras. The proof uses a variant of the Tait's\index{Tait} strong computability predicates. It is well known that in a system of simultaneously defined free algebras every term (possibly involving recursion operators) is strongly normalizing. However, the standard proof reduces the problem to strong normalization of second order propositional logic (called system $F$ by Girard \cite{Girard71}). This latter result requires a method not formalizable in analysis. Here we give a much simpler proof, which only uses predicative methods. \subsection{Types} \mylabel{SS:Types} Our type system is defined by three type forming operations: arrow types\index{arrow types} $\rho \to \sigma$, pair types\index{pair types} $\rho \typeProd \sigma$ and the formation of inductively generated types $\mu \vec{\alpha} \,\vec{\kappa}$, where $\vec{\alpha} = (\alpha_j)_{j=1,\dots,N}$ is a list of distinct \inquotes{type variables}, and $\vec{\kappa} = (\kappa_i)_{i=1,\dots,k}$ is a list of \inquotes{constructor types}, whose argument types contain $\alpha_1,\dots,\alpha_N$ in strictly positive positions only. For instance, $\mu \alpha (\alpha, \alpha \to \alpha)$ is the type of natural numbers; here the list $(\alpha, \alpha \to \alpha)$ stands for two generation principles: $\alpha$ for \inquotes{there is a natural number} (the $0$), and $\alpha \to \alpha$ for \inquotes{for every natural number there is another one} (its successor). Let an infinite supply of \emph{type variables} $\alpha, \beta$ be given. \begin{definition*} Let $\vec{\alpha} = (\alpha_j)_{j=1,\dots,N}$ be a list of distinct type variables. \emph{Types} $\rho, \sigma, \tau, \mu \in \types$ and \emph{constructor types} $\kappa \in \constrtypes(\vec{\alpha})$ are defined inductively as follows. \begin{align*} &\frac{\vec{\rho}, \vec{\sigma}_1, \dots, \vec{\sigma}_n \in \types} {\vec{\rho} \to (\vec{\sigma}_1 \to \alpha_{j_1}) \to \dots \to (\vec{\sigma}_n \to \alpha_{j_n}) \to \alpha_j \in \constrtypes(\vec{\alpha})} \quad\hbox{($n \ge 0$)} \\ &\frac{\kappa_1, \dots, \kappa_n \in \constrtypes(\vec{\alpha})} {(\mu \vec{\alpha} \,(\kappa_1, \dots, \kappa_n))_j \in \types} \quad\hbox{($n \ge 1$)} \qquad \frac{\rho, \sigma \in \types}{\rho \to \sigma \in \types} \qquad \frac{\rho, \sigma \in \types}{\rho \typeProd \sigma \in \types} \end{align*} \end{definition*} Here $\vec{\rho}$ is short for a list $\rho_1,\dots,\rho_m$ ($m\ge 0$) of types and $\vec{\rho} \to \sigma$ means $\rho_1 \to \dots \to \rho_m \to \sigma$, associated to the right. We shall use $\mu$ for types of the form $(\mu \vec{\alpha} \,(\kappa_1, \dots, \kappa_k))_j$ only, and for types $\vec{\tau}$ and a constructor type $\kappa = \vec{\rho} \to (\vec{\sigma}_1 \to \alpha_{j_1}) \to \dots \to (\vec{\sigma}_n \to \alpha_{j_n}) \to \alpha_j \in \constrtypes(\vec{\alpha})$ let \[ \kappa[\vec{\tau}] := \vec{\rho} \to (\vec{\sigma}_1 \to \tau_{j_1}) \to \dots \to (\vec{\sigma}_n \to \tau_{j_n}) \to \tau_j. \] \begin{examples*} \begin{alignat*}{2} &\typeUnit &&:= \mu \alpha\,\alpha, \\ &\typeBool &&:= \mu \alpha\,(\alpha,\alpha), \\ &\typeN &&:= \mu \alpha\,(\alpha,\alpha \to \alpha), \\ &\typeL{\rho} &&:= \mu \alpha\,(\alpha,\rho \to \alpha \to \alpha), \\ &\rho_1 + \rho_2 &&:= \mu \alpha\,(\rho_1 \to \alpha, \rho_2 \to \alpha), \\ &(\tree, \tlist) &&:= \mu (\alpha, \beta)\, (\typeN \to \alpha, \beta \to \alpha, \beta, \alpha \to \beta \to \beta), \\ &\typeBin &&:= \mu \alpha\,(\alpha, \alpha \to \alpha \to \alpha), \\ &\C{O} &&:= \mu \alpha\, (\alpha, \alpha \to \alpha, (\typeN \to \alpha) \to \alpha), \\ &\C{T}_0 &&:= \typeN, \\ &\C{T}_{n+1} &&:= \mu \alpha\,(\alpha, (\C{T}_n \to \alpha) \to \alpha). \end{alignat*} Notice that there are many equivalent ways to define these types. For instance, we could take $\typeUnit \typeSum \typeUnit$ to be the type of booleans, and $\typeL{\typeUnit}$ to be the type of natural numbers. Notice also that we have added the pair type $\rho \typeProd \sigma$ for simplicity only. Products could have been defined in two forms, as tensor products and as cartesian products, by \begin{alignat*}{2} &\rho_1 \typeTensor \rho_2 &&:= \mu \alpha. \rho_1 \to \rho_2 \to \alpha, \\ &\rho_1 \times_\sigma \rho_2 &&:= \mu \alpha. (\sigma \to \rho_1) \to (\sigma \to \rho_2) \to \sigma \to \alpha, \end{alignat*} If we would allow ourselves to quantify over types, the cartesian product could be defined as \[ \rho_1 \times \rho_2 := \mu \alpha \forall \sigma. (\sigma \to \rho_1) \to (\sigma \to \rho_2) \to \sigma \to \alpha. \] \end{examples*} \subsection{Terms} \mylabel{SS:Terms} The inductive structure of the types $\vec{\mu} = \mu \vec{\alpha}\,\vec{\kappa}$ corresponds to two sorts of constants. With the \emph{constructors} $\constr_i^{\vec{\mu}} \colon \kappa_i[\vec{\mu}]$ we can construct elements of a type $\mu_j$, and with the \emph{recursion operators} $\rec_{\mu_j}^{\vec{\mu}, \vec{\tau}}$ we can construct mappings from $\mu_j$ to $\tau_j$ by recursion on the structure of $\vec{\mu}$. In order to define the type of the recursion operators w.r.t.\ $\vec{\mu} = \mu\vec{\alpha} \, \vec{\kappa}$ and result types $\vec{\tau}$, we first define for \[ \kappa_i = \vec{\rho} \to (\vec{\sigma}_1 \to \alpha_{j_1}) \to \dots \to (\vec{\sigma}_n \to \alpha_{j_n}) \to \alpha_j \in \constrtypes(\vec{\alpha}) \] the \emph{step type} \begin{align*} \ST_i^{\vec{\mu}, \vec{\tau}} := \vec{\rho} \to {} &(\vec{\sigma}_1 \to \mu_{j_1}) \to \dots \to (\vec{\sigma}_n \to \mu_{j_n}) \to \\ &(\vec{\sigma}_1 \to \tau_{j_1}) \to \dots \to (\vec{\sigma}_n \to \tau_{j_n}) \to \tau_j. \end{align*} Here $\vec{\rho}, (\vec{\sigma}_1 \to \mu_{j_1}), \dots, (\vec{\sigma}_n \to \mu_{j_n})$ correspond to the \emph{components} % (or \emph{parameters}) of the object of type $\mu_j$ under consideration, and $(\vec{\sigma}_1 \to \tau_{j_1}), \dots, (\vec{\sigma}_n \to \tau_{j_n})$ to the previously defined values. The recursion operator $\rec_{\mu_j}^{\vec{\mu}, \vec{\tau}}$ has type \[ \rec_{\mu_j}^{\vec{\mu}, \vec{\tau}} \colon \ST_1^{\vec{\mu}, \vec{\tau}} \to \dots \to \ST_k^{\vec{\mu}, \vec{\tau}} \to \mu_j \to \tau_j \] (recall that $k$ is the total number of constructors for all types $\mu_1, \dots, \mu_N$). We will often write $\rec_j^{\vec{\mu}, \vec{\tau}}$ for $\rec_{\mu_j}^{\vec{\mu}, \vec{\tau}}$, and omit the upper indices $\vec{\mu}, \vec{\tau}$ when they are clear from the context. In case of a non-simultaneous free algebra, i.e.\ of type $\mu \alpha\,\kappa$, for $\rec_\mu^{\mu, \tau}$ we normally write $\rec_\mu^\tau$. \begin{definition*} \emph{Terms}\index{term} are inductively defined from typed variables and the constants $\constr_i^{\vec{\mu}}$ and $\rec_{\mu_j}^{\vec{\mu}, \vec{\tau}}$ by means of \begin{itemize} \item \indexentry{abstraction} $(\lambda x^\rho M^\sigma)^{\rho \to \sigma}$, \item \indexentry{application} $(M^{\rho \to \sigma}N^\rho)^\sigma$, \item \indexentry{pairing} $\pair{M^{\rho}}{N^{\sigma}}^{\rho \typeProd \sigma}$ and \item \indexentry{projections} $(M^{\rho \typeProd \sigma}0)^\rho$, $(M^{\rho \typeProd \sigma}1)^\sigma$. \end{itemize} \end{definition*} \begin{examples*} \begin{alignat*}{2} &\true^{\typeBool} := \constr_1^{\typeBool},\quad \false^{\typeBool} := \constr_2^{\typeBool}, \\ &\rec_{\typeBool}^\tau \colon \tau \to \tau \to \typeBool \to \tau, \\[6pt] &0^{\typeN} := \constr_1^{\typeN},\quad \suc^{\typeN \to \typeN} := \constr_2^{\typeN}, \\ &\rec_{\typeN}^\tau \colon \tau \to (\typeN \to \tau \to \tau) \to \typeN \to \tau, \\[6pt] &\nil^{\typeL{\alpha}} := \constr_1^{\typeL{\alpha}},\quad \cons^{\alpha \to \typeL{\alpha} \to \typeL{\alpha}} := \constr_2^{\typeL{\alpha}}, \\ &\rec_{\typeL{\alpha}}^\tau \colon \tau \to (\alpha \to \typeL{\alpha} \to \tau \to \tau) \to \typeL{\alpha} \to \tau, \\[6pt] &\bigl(\termSumIntroLeft_{{\rho_1}{\rho_2}} \bigr)^ {\rho_1 \to \rho_1 \typeSum \rho_2} := \constr_1^{\rho_1 \typeSum \rho_2}, \\ &\bigl(\termSumIntroRight_{{\rho_1}{\rho_2}} \bigr)^ {\rho_2 \to \rho_1 \typeSum \rho_2} := \constr_2^{\rho_1 \typeSum \rho_2}, \\ &\rec_{\rho_1 \typeSum \rho_2}^\tau \colon (\rho_1 \to \tau) \to (\rho_2 \to \tau) \to \rho_1 \typeSum \rho_2 \to \tau. \end{alignat*} \end{examples*} \begin{remark*} Notice that for the defined products the constructors and recursion operators are \begin{alignat*}{2} &\bigl(\termTensorIntro_{\rho_1 \rho_2} \bigr)^ {\rho_1 \to \rho_2 \to \rho_1 \typeTensor \rho_2} := \constr_1^{\rho_1 \typeTensor \rho_2}, \\ &\rec_{\rho_1 {\typeTensor} \rho_2}^{\tau} \colon (\rho_1 \to \rho_2 \to \tau) \to \rho_1 {\typeTensor} \rho_2 \to \tau, \\[6pt] &\bigl(\termProdIntro_{{\rho_1}{\rho_2}{\sigma}} \bigr)^ {(\sigma \to \rho_1) \to (\sigma \to \rho_2) \to \sigma \to \rho_1 \typeProd_\sigma \rho_2} := \constr_1^{\rho_1 \typeProd_\sigma \rho_2}, \\ &\rec_{\rho_1 \typeProd_\sigma \rho_2}^{\tau} \colon ((\sigma \to \rho_1) \to (\sigma \to \rho_2) \to \sigma \to \tau) \to \rho_1 \typeProd_\sigma \rho_2 \to \tau, \end{alignat*} \end{remark*} \begin{examples*} The \indexentry{append}-function $\listappend$ for lists is defined recursively by \begin{align*} \nil \listappend l_2 &:= l_2, \\ (\cons\, x\, l_1) \listappend l_2 &:= \cons\, x (l_1 \listappend l_2). \end{align*} It can be defined as the term \[ M_{\listappend} := \rec_{\typeL{\alpha}}^{\typeL{\alpha} \to \typeL{\alpha}} (\lambda l_2 l_2) (\lambda x \lambda l_1 \lambda p \lambda l_2. \cons\, x (p l_2). \] Using the append function $\listappend$ we can define \indexentry{list reversal} $\listrev$ by \begin{align*} \listrev\;\nil &:= \nil, \\ \listrev (\cons\, x\, l) &= (\listrev\; l) \listappend (\cons\, x \nil). \end{align*} It can be defined as the term \[ \rec_{\typeL{\alpha}}^{\typeL{\alpha}} \nil (\lambda x \lambda l \lambda p. M_{\listappend}\,p\,(\cons\, x \nil)). \] Assume we want to define by simultaneous recursion two functions on $\typeN$, say $\Even, \odd \colon \typeN \to \typeBool$. We want \begin{align*} \Even(0) &:= \true & \odd(0) &:= \false \\ \Even(\suc\, n) &:= \odd(n) & \odd(\suc\, n) &:= \Even(n) \end{align*} This can be achieved by using pair types: we recursively define the single function $\Evenodd \colon \typeN \to \typeBool \typeProd \typeBool$. The step types are \begin{align*} \ST_1 &= \typeBool \typeProd \typeBool, \\ \ST_2 &= \typeN \to \typeBool \typeProd \typeBool \to \typeBool \typeProd \typeBool, \end{align*} and we can define $\Evenodd := \rec_{\typeN}^{\typeBool \typeProd \typeBool} \pair{\false}{\true}(\lambda n \lambda p. \pair{p 1}{p 0})$. Our final example concerns the simultaneously defined free algebras $\tree$ and $\tlist$, whose constructors $\constr_i^{(\tree, \tlist)}$ for $i \in \{1,\dots, 4\}$ are \[ \leaf^{\typeN \to \tree}, \branch^{\tlist \to \tree}, \emp^{\tlist}, \tcons^{\tree \to \tlist \to \tlist}. \] Observe that the elements of the algebra $\tree$ are just the finitely branching trees, which carry natural numbers on their leaves. Let us compute the types of the recursion operators w.r.t.\ the result types $\tau_1, \tau_2$, i.e.\ of $\rec_{\tree}^{(\tree,\tlist), (\tau_1,\tau_2)}$ and $\rec_{\tlist}^{(\tree,\tlist), (\tau_1,\tau_2)}$, or shortly $\rec_{\tree}$ and $\rec_{\tlist}$. The step types are \begin{align*} \ST_1 &:= \typeN \to \tau_1, \\ \ST_2 &:= \tlist \to \tau_2 \to \tau_1, \\ \ST_3 &:= \tau_2, \\ \ST_4 &:= \tree \to \tlist \to \tau_1 \to \tau_2 \to \tau_2. \end{align*} Hence the types of the recursion operators are \begin{align*} \rec_{\tree} &\colon \ST_1 \to \ST_2 \to \ST_3 \to \ST_4 \to \tree \to \tau_1, \\ \rec_{\tlist} &\colon \ST_1 \to \ST_2 \to \ST_3 \to \ST_4 \to \tlist \to \tau_2. \end{align*} To see a concrete example, let us recursively define addition $+ \colon \tree \to \tree \to \tree$ and $\oplus \colon \tlist \to \tree \to \tlist$. The recursion equations to be satisfied are \begin{alignat*}{2} &+(\leaf\, n) &&= \lambda a a, \\ &+(\branch\,\bs) &&= \lambda a. \branch(\oplus\,\bs\,a), \\[6pt] &\oplus\,\emp &&= \lambda a\,\emp, \\ &\oplus(\tcons\,b\,\bs) &&= \lambda a. \tcons(+\,b\,a)(\oplus\,\bs\,a). \end{alignat*} We define $+$ and $\oplus$ by means of the recursion operators $\rec_{\tree}$ and $\rec_{\tlist}$ with result types \begin{align*} \tau_1 &:= \tree \to \tree, \\ \tau_2 &:= \tree \to \tlist. \end{align*} The step terms are \begin{align*} M_1 &:= \lambda n \lambda a a, \\ M_2 &:= \lambda \bs \lambda g^{\tau_2} \lambda a. \branch(g\,a), \\ M_3 &:= \lambda a\,\emp, \\ M_4 &:= \lambda b \lambda \bs \lambda f^{\tau_1} \lambda g^{\tau_2} \lambda a. \tcons(f\,a)(g\,a). \end{align*} Then \begin{align*} + &:= \rec_{\tree} \vec{M} \colon \tree \to \tree \to \tree, \\ \oplus &:= \rec_{\tlist} \vec{M} \colon \tlist \to \tree \to \tlist. \end{align*} \end{examples*} \begin{remark}\mylabel{R:simpRec} It may happen that in a recursion on simultaneously defined algebras one only needs to recur on some of those algebras. Then we can simplify the type of the recursion operator accordingly, by \begin{itemize} \item omitting all step types $\ST_i^{\vec{\mu}, \vec{\tau}}$ with irrelevant value type $\tau_j$, and \item simplifying the remaining step types by omitting from the types $(\vec{\sigma}_1 \to \tau_{j_1}), \dots, (\vec{\sigma}_n \to \tau_{j_n})$ of previously defined values all those with irrelevant $\tau_{j_{\nu}}$. \end{itemize} In the $\tree,\tlist$-example, if we only want to recur on $\tlist$, then the step types are \begin{align*} \ST_3 &:= \tau_2, \\ \ST_4 &:= \tree \to \tlist \to \tau_2 \to \tau_2. \end{align*} Hence the type of the simplified recursion operator is \begin{align*} \rec_{\tlist} &\colon \ST_3 \to \ST_4 \to \tlist \to \tau_2. \end{align*} An example is the recursive definition of the length of a $\tlist$. The recursion equations are \begin{alignat*}{2} &\len{\emp} &&= 0, \\ &\len{\tcons\,b\,\bs} &&= \len{\bs} + 1. \end{alignat*} The step terms are \begin{align*} M_3 &:= \emp, \\ M_4 &:= \lambda b \lambda \bs \lambda p. p+1. \end{align*} \end{remark} \begin{remark}\mylabel{R:Cases} There is an important variant of recursion, where no recursive calls occur. This variant is called the \indexentry{cases operator}\index{cases-operator@$\Cases$-operator}; it distinguishes cases according to the outer constructor form. Here all step types have the form \[ \ST_i^{\vec{\mu}, \vec{\tau}} := \vec{\rho} \to (\vec{\sigma}_1 \to \mu_{j_1}) \to \dots \to (\vec{\sigma}_n \to \mu_{j_n}) \to \tau_j. \] The intended meaning of the cases operator is given by the conversion rule (cf.\ \eqref{recconv} below) \[ (\Cases_j \vec{M})^{\mu_j \to \tau_j} (\constr_i^{\vec{\mu}} \vec{N}) \cnv M_i \vec{N}. \] Notice that only those step terms are used whose value type is the present $\tau_j$; this is due to the fact that there are no recursive calls. Therefore the type of the cases operator is \[ \Cases_{\mu_j \to \tau_j}^{\vec{\mu}} \colon \ST_{i_1} \to \dots \to \ST_{i_q} \to \mu_j \to \tau_j, \] where $\ST_{i_1}, \dots, \ST_{i_q}$ consists of all $\ST_i$ with value type $\tau_j$. We write $\Cases_{\mu_j \to \tau_j}$ or even $\Cases_{j}$ for $\Cases_{\mu_j \to \tau_j}^{\vec{\mu}}$. The simplest example (for the type $\typeBool$) is \indexentry{if-then-else}. Another example is the predecessor function on $\typeN$, i.e.\ $\pred(0) := 0$, $\pred(\suc(n)) := n$. It can formally be defined by the term \[ \Cases_{\typeN \to \typeN}0 (\lambda n n). \] In the $\tree, \tlist$-example we have \begin{align*} \Cases_{\tlist \to \tau_2} \colon \tau_2 \to (\tree \to \tlist \to \tau_2) \to \tlist \to \tau_2. \end{align*} \end{remark} \begin{remark}\mylabel{R:If} When computing the value of a cases operator, we may not want to (eagerly) evaluate all arguments, but rather evaluate the test argument first and depending on the result (lazily) evaluate at most one of the other arguments. This phenomenon is well known in functional languages; e.g.\ in \textsc{Scheme} the \texttt{if}-construct is called a \indexentry{special form} (as opposed to an operator). Therefore we also provide an \texttt{if}-construct\index{if-construct@\texttt{if}-construct} to build terms, which differs from the cases operator only in that it employs lazy evaluation. The predecessor function could then be written in the form $\lambda m [\texttt{if} \ m\ 0\ \lambda n n]$. \end{remark} \subsection{Conversion Relation} \mylabel{SS:Conv3} It will be useful to employ the following notation. Let $\vec{\mu} = \mu \vec{\alpha} \,\vec{\kappa}$ and \[ \kappa_i = \rho_1 \to \dots \to \rho_m \to (\vec{\sigma}_1 \to \alpha_{j_1}) \to \dots \to (\vec{\sigma}_n \to \alpha_{j_n}) \to \alpha_j \in \constrtypes(\vec{\alpha}), \] and consider $\constr_i^{\vec{\mu}} \vec{N}$. Then we write $\vec{N}^P = N_1^P, \dots, N_m^P$ for the \emph{parameter arguments} $N_1^{\rho_1}, \dots, N_m^{\rho_m}$ and $\vec{N}^R = N_1^R, \dots, N_n^R$ for the \emph{recursive arguments} $N_{m+1}^{\vec{\sigma}_1 \to \mu_{j_1}}, \dots, N_{m+n}^{\vec{\sigma}_n \to \mu_{j_n}}$, and $n^R$ for the number $n$ of recursive arguments. We define a \indexentry{conversion relation} $\cnv_\rho$ between terms of type $\rho$ by \begin{align} (\lambda xM)N &\cnv \subst{M}{x}{N}\label{betaconv} \\ \pair{M_0}{M_1}i &\cnv Mi \quad \hbox{($i=0,1$)}\label{pairbetaconv} \\ \lambda x.Mx &\cnv M \quad\hbox{if $x \notin \FV(M)$ ($M$ not an abstraction)}\label{etaconv} \\ \pair{M 0}{M 1}&\cnv M \quad\hbox{($M$ not a pair)}\label{pairetaconv} \\ (\rec_j \vec{M})^{\mu_j \to \tau_j} (\constr_i^{\vec{\mu}} \vec{N}) &\cnv M_i \vec{N} \bigl( (\rec_{j_1} \vec{M}) \circ N_1^R\bigr) \dots \bigl( (\rec_{j_n} \vec{M}) \circ N_n^R\bigr) \label{recconv} \end{align} Here we have written $\rec_j$ for $\rec_{\mu_j}^{\vec{\mu}, \vec{\tau}}$. The \emph{one step reduction relation} $\red$ can now be defined as follows. $M \red N$ if $N$ is obtained from $M$ by replacing a subterm $M'$ in $M$ by $N'$, where $M' \cnv N'$. The reduction relations $\redplus$ and $\redstar$ are the transitive and the reflexive transitive closure of $\red$, respectively. For $\vec{M} = M_1,\dots,M_n$ we write $\vec{M} \red \vec{M'}$ if $M_i \red M'_i$ for some $i \in \{1,\dots,n\}$ and $M_j = M'_j$ for all $i \ne j \in \{1,\dots,n\}$. A term $M$ is \emph{normal} (or in \emph{normal form}) if there is no term $N$ such that $M \red N$. Clearly normal closed terms are of the form $\constr_i^{\vec{\mu}} \vec{N}$. \begin{example*} Let us check the conversion rules for the defined $+$ and $\oplus$ of our example above. We have \begin{align*} &+(\leaf\, n) = \rec_{\tree} \vec{M}(\leaf\, n) \cnv M_1 n \cnv \lambda a a, \\ &\oplus\,\emp = \rec_{\tlist} \vec{M} \,\emp \cnv M_3 = \lambda a\,\emp \end{align*} and \begin{align*} +(\branch\,\bs) &= \rec_{\tree} \vec{M}(\branch\,\bs) \\ &\cnv M_2 \,\bs\,\bigl( (\rec_{\tlist} \vec{M}) \circ \bs\bigr) \\ &= M_2 \,\bs\,( \oplus\,\bs) \\ &\red \lambda a. \branch(\oplus\,\bs\,a),\\[6pt] \oplus(\tcons\,b\,\bs) &= \rec_{\tlist} \vec{M}(\tcons\,b\,\bs) \\ &\cnv M_4\,b\,\bs\,\bigl( (\rec_{\tree} \vec{M}) \circ b\bigr) \bigl( (\rec_{\tlist} \vec{M}) \circ \bs\bigr) \\ &= M_4\,b\,\bs\,(+\,b)(\oplus\,\bs) \\ &\red \lambda a. \tcons(+\,b\,a)(\oplus\,\bs\,a). \end{align*} \end{example*} \subsection{Strong Computability Predicates} \mylabel{SS:SC} \begin{definition*} The set $\SN$ of \emph{strongly normalizable} terms is inductively defined by \begin{equation} \label{sndef} (\forall N. M \red N \imp N \in \SN) \imp M \in \SN \end{equation} \end{definition*} Note that with $M$ clearly every subterm of $M$ is strongly normalizable. \begin{definition*} We define \emph{strong computability predicates}\index{strong computability} $\SC^\rho$\index{SC@$\SC$} by induction on $\rho$. \emph{Case} $\mu_j = (\mu \vec{\alpha}\,\vec{\kappa})_j$. Then $M \in \SC^{\mu_j}$ if \begin{align} &\forall N.M \red N \imp N \in \SC,\ \hbox{and}\label{sc1} \\ &M = \constr_i^{\vec{\mu}} \vec{N} \imp \vec{N}^P \in \SC \land \bigland_{p=1}^{n^R}(\forall \vec{K} {\in} \SC)\, N_p^R \vec{K} \in \SC^{\mu_{j_p}}. \label{sc2} \end{align} \emph{Case} $\rho \to \sigma$. \[ M \in \SC^{\rho \to \sigma} \defiff (\forall N{\in} \SC^\rho)\, MN \in \SC^\sigma. \] \emph{Case} $\rho \typeProd \sigma$. \[ M \in \SC^{\rho \typeProd \sigma} \defiff \hbox{$M0 \in \SC^\rho$ and $M1 \in \SC^\sigma$.} \] \end{definition*} Notice that the reference to $\vec{N}^P \in \SC$ and $\vec{K} {\in} \SC$ in \eqref{sc2} is legal, because the types $\vec{\rho}, \vec{\sigma}_i$ of $\vec{N}, \vec{K}$ must have been generated \emph{before} $\mu_j$. Note also that by \eqref{sc2} $\constr_i^{\vec{\mu}} \vec{N} \in \SC$ implies $\vec{N} \in \SC$. We now set up a sequence of lemmas leading to a proof that every term is strongly normalizing. In the proofs we disregard the product case, since it can be treated routinely. \begin{lemma} \mylabel{L:onestep} If $M \in \SC^\rho$ and $M \red M'$, then $M' \in \SC$. \end{lemma} \begin{proof} Induction on $\rho$. \emph{Case} $\mu$. By \eqref{sc1}. \emph{Case} $\rho \to \sigma$. Assume $M \in \SC^{\rho \to \sigma}$ and $M \red M'$; we must show $M' \in \SC$. So let $N \in \SC^\rho$; we must show $M'N \in \SC^\sigma$. But this follows from $MN \red M'N$ and $MN \in \SC^\rho$ by induction hypothesis (IH) on $\sigma$. \end{proof} \begin{lemma} \mylabel{L:var} $(\forall \vec{M}{\in} \SN). \vec{M} \in \SC \imp (x \vec{M})^\mu \in \SC$. \end{lemma} \begin{proof} Induction on $\vec{M} \in \SN$. Assume $\vec{M} \in \SN$ and $\vec{M} \in \SC$; we must show $(x \vec{M})^\mu \in \SC$. So assume $x\vec{M} \red N$; we must show $N \in \SC$. Now by the form of the conversion rules $N$ must be of the form $x\vec{M'}$ with $\vec{M} \red \vec{M'}$. But $\vec{M'} \in \SC$ by Lemma~\ref{L:onestep}, hence $x\vec{M'} \in \SC$ by IH for $\vec{M'}$. \end{proof} \begin{lemma}\mylabel{L:sc} \begin{enumeratea} \item $\SC^\rho \subseteq \SN$, \label{L:sc:a} \item $x \in \SC^\rho$. \label{L:sc:b} \end{enumeratea} \end{lemma} \begin{proof} By simultaneous induction on $\rho$. \emph{Case} $\mu_j = (\mu \vec{\alpha}\,\vec{\kappa})_j$. \eqref{L:sc:a}. We show $M \in \SC^{\mu_j} \imp M \in \SN$ by (side) induction on $M \in \SC^{\mu_j}$. So assume $M \in \SC^{\mu_j}$; we must show $M \in \SN$. But for every $N$ with $M \red N$ we have $N \in \SC$ by \eqref{sc1}, hence $N \in \SN$ by the side induction hypothesis SIH. \eqref{L:sc:b}. $x \in \SC^{\mu_j}$ holds trivially. \emph{Case} $\rho \to \sigma$. \eqref{L:sc:a}. Assume $M \in \SC^{\rho \to \sigma}$; we must show $M \in \SN$. By IH\eqref{L:sc:b} for $\rho$ we have $x \in \SC^\rho$, hence $Mx \in \SC^\sigma$, hence $Mx \in \SN$ by IH\eqref{L:sc:a} for $\sigma$. But $Mx \in \SN$ clearly implies $M \in \SN$. \eqref{L:sc:b}. Let $\vec{M} \in \SC^{\vec{\rho}}$ with $\rho_1 = \rho$; we must show $x \vec{M} \in \SC^\mu$. But this follows from Lemma~\ref{L:var}, using IH\eqref{L:sc:a} for $\vec{\rho}$. \end{proof} \begin{corollary} \mylabel{C:scconstr} $\vec{N} \in \SC \imp \constr_i^{\vec{\mu}} \vec{N} \in \SC$, i.e.\ $\constr_i^{\vec{\mu}} \in \SC$. \end{corollary} \begin{proof} First show $(\forall \vec{N} {\in} \SN). \vec{N} \in \SC \imp \constr_i^{\vec{\mu}} \vec{N} \in \SC$ by induction on $\vec{N} \in \SN$ as in Lemma~\ref{L:var}, and then use Lemma~\ref{L:sc}\eqref{L:sc:a}. \end{proof} \begin{lemma} \mylabel{L:scheadmu} \begin{align*} &(\forall M,N,\vec{N} {\in} \SN). \subst{M}{x}{N} \vec{N} \in \SC^\mu \imp (\lambda x M)N \vec{N} \in \SC^\mu. \\ &(\forall M_0,M_1,\vec{N} {\in} \SN). M_0 \vec{N}, M_1 \vec{N} \in \SC^\mu \imp \pair{M_0}{M_1}i \vec{N} \in \SC^\mu. \end{align*} \end{lemma} \begin{proof} By induction on $M,N,\vec{N} \in \SN$. Let $M,N,\vec{N} \in \SN$ and assume $\subst{M}{x}{N} \vec{N} \in \SC$; we must show $(\lambda x M)N \vec{N} \in \SC$. Assume $(\lambda x M)N \vec{N} \red K$; we must show $K \in \SC$. \emph{Case} $K = (\lambda x M')N'\vec{N'}$ with $M, N, \vec{N} \red M', N', \vec{N'}$. Then $\subst{M}{x}{N} \vec{N} \redstar \subst{M'}{x}{N'} \vec{N'}$, hence by \eqref{sc1} from our assumption $\subst{M}{x}{N} \vec{N} \in \SC$ we can infer $\subst{M'}{x}{N'} \vec{N'} \in \SC$, therefore $(\lambda x M')N'\vec{N'} \in \SC$ by IH. \emph{Case} $K=\subst{M}{x}{N} \vec{N}$. Then $K \in \SC$ by assumption. \end{proof} \begin{corollary} \mylabel{C:schead} \begin{align*} &(\forall M,N,\vec{N} {\in} \SN). \subst{M}{x}{N} \vec{N} \in \SC^\rho \imp (\lambda x M)N \vec{N} \in \SC^\rho. \\ &(\forall M_0,M_1,\vec{N} {\in} \SN). M_0 \vec{N}, M_1 \vec{N} \in \SC^\rho \imp \pair{M_0}{M_1}i \vec{N} \in \SC^\rho. \end{align*} \end{corollary} \begin{proof} By induction on $\rho$, using Lemma~\ref{L:sc}\eqref{L:sc:a}. \end{proof} \begin{lemma} \mylabel{L:screc} $(\forall N {\in} \SC^{\mu_j}) (\forall \vec{M},\vec{L} {\in} \SN). \vec{M},\vec{L} \in \SC \imp \rec_{j} \vec{M}N \vec{L} \in \SC^\mu$. \end{lemma} \begin{proof} By main induction on $N \in \SC^{\mu_j}$, and side induction on $\vec{M},\vec{L} \in \SN$. Assume \[ \rec_{j} \vec{M}N \vec{L} \red L. \] We must show $L \in \SC$. \emph{Case} 1. $\rec_{j} \vec{M'}N \vec{L'} \in \SC$ by the SIH. \emph{Case} 2. $\rec_{j} \vec{M}N'\vec{L} \in \SC$ by the main induction hypothesis (IH). \emph{Case} 3. $N = \constr_i^{\vec{\mu}} \vec{N}$ and \[ L = M_i \vec{N} \bigl( (\rec_{j} \vec{M}) \circ N_1^R\bigr) \dots \bigl( (\rec_{j} \vec{M}) \circ N_n^R\bigr) \vec{L}. \] $\vec{M},\vec{L} \in \SC$ by assumption. $\vec{N} \in \SC$ follows from $N = \constr_i^{\vec{\mu}} \vec{N} \in \SC$ by \eqref{sc2}. Note that for all recursive arguments $N_p^R$ of $N$ and all strongly computable $\vec{K}$ by \eqref{sc2} we have the IH for $N_p^R \vec{K}$ available. It remains to show $(\rec_{j} \vec{M}) \circ N_p^R = \lambda \vec{x}_p. \rec_{j} \vec{M} (N_p^R \vec{x}_p) \in \SC$. So let $\vec{K}, \vec{Q} \in \SC$ be given. We must show $(\lambda \vec{x}_p. \rec_{j} \vec{M} (N_p^R \vec{x}_p)) \vec{K} \vec{Q} \in \SC$. By IH for $N_p^R \vec{K}$ we have $\rec_{j} \vec{M}(N_p^R \vec{K}) \vec{Q} \in \SC$, since by Lemma~\ref{L:sc}\eqref{L:sc:a} $\vec{K}, \vec{Q} \in \SN$. Now Corollary~\ref{C:schead} yields the claim. \end{proof} \begin{corollary} \mylabel{C:screccor} $\rec_{j} \in \SC$.\qed \end{corollary} \begin{definition*} A substitution $\xi$ is \emph{strongly computable}, if $\xi(x) \in \SC$ for all variables $x$. A term $M$ is \emph{strongly computable under substitution}, if $M \xi \in \SC$ for all strongly computable substitutions $\xi$. \end{definition*} \begin{theorem} Every term is strongly computable under substitution. \end{theorem} \begin{proof} Induction on the term $M$. \emph{Case} $x$. $x\xi \in \SC$, since $\xi$ is strongly computable. \emph{Case} $\constr_i^{\vec{\mu}}$. By Corollary~\ref{C:scconstr}. \emph{Case} $\rec_{j}$. By Corollary~\ref{C:screccor}. \emph{Case} $MN$. By IH $M \xi, N \xi \in \SC$, hence $(MN) \xi = (M \xi)(N \xi) \in \SC$. \emph{Case} $\lambda xM$. Let $\xi$ be a strongly computable substitution; we must show $(\lambda xM) \xi = \lambda xM \xi_x^x \in \SC$. So let $N \in \SC$; we must show $(\lambda xM \xi_x^x)N \in \SC$. By IH $M \xi_x^N \in \SC$, hence $(\lambda xM \xi_x^x)N \in \SC$ by Corollary~\ref{C:schead}. \end{proof} \begin{corollary} \mylabel{C:sn} Every term is strongly normalizable. \fini \end{corollary} \section{Rewrite Rules}\mylabel{S:Rew} The elimination constants corresponding to the constructors are called primitive recursion operators $\rec$. They have been described in detail in Section~\ref{S:Rec}. In this setup, every closed term reduces to a numeral. For convenience, we shall also use constants for rather arbitrary computable functionals, and axiomatize them according to their intended meaning by means of rewrite rules. An example is the general fixed point operator $\fix$, which is axiomatized by $\fix F = F(\fix F)$. Clearly then it cannot be true any more that every closed term reduces to a numeral. We may have non-terminating terms, but this just means that not always it is a good idea to try to normalize a term. An important consequence of admitting non-terminating terms is that our notion of proof is not decidable: when checking e.g.\ whether two terms are equal we may run into a non-terminating computation. To avoid this somewhat unpleasant undecidability phenomenon, we may view our proofs as abbreviated forms of full proofs, with certain equality arguments left implicit. If some information sufficient to recover the full proof (e.g.\ for each node a bound on the number of rewrite steps needed to verify it) is stored as part of the proof, then we retain decidability of proofs. However, even without such additional information we still have semi-decidability of proofs, i.e., an algorithm to check the correctness of a proof that can only give correct results, but may not terminate. In practice this is sufficient. We now describe in some detail our concept of rewrite rules. For every program constant $c^\rho$ we assume that some rewrite rules of the form $c\vec{K} \cnv N$ are given, where $\FV(N) \subseteq \FV(\vec{K})$ and $c\vec{K}$, $N$ have the same type (not necessarily a ground type). Moreover, for any two rules $c\vec{K} \cnv N$ and $c\vec{K}' \cnv N'$ we require that $\vec{K}$ and $\vec{K}'$ are of the same length, called the \emph{arity}\index{arity!of a program constant} of $c$. Given a set of rewrite rules, we want to treat some rules - which we call \indexentry{computation rules} - in a different, more efficient way (cf.\ \cite{BergerEberlSchwichtenberg03}). The idea is that a computation rule can be understood as a description of a computation in a suitable \indexentry{semantical model}, provided the syntactic constructors correspond to semantic ones in the model, whereas the other rules describe \emph{syntactic} transformations. In order to define what we mean by computation rules, we need the notion of a \indexentry{constructor pattern}. These are special terms defined inductively as follows. \begin{itemize} \item Every variable is a constructor pattern. \item If $c$ is a constructor and $P_1,\dots,P_n$ are constructor patterns (or projection markers 0 or 1) such that $c \vec{P}$ is of ground type, then $c\vec{P}$ is a constructor pattern. \end{itemize} From the given set of rewrite rules we choose a subset $\Comp$ with the following properties. \begin{itemize} \item If $c\vec{P} \cnv Q \in \Comp$, then $P_1,\dots,P_n$ are constructor patterns or projection markers. \item The rules are left-linear, i.e.\ if $c\vec{P} \cnv Q \in \Comp$, then every variable in $c\vec{P}$ occurs only once in $c\vec{P}$. \item The rules are non-overlapping, i.e.~for different rules $c\vec{K} \cnv M$ and $c\vec{L} \cnv N$ in $\Comp$ the left hand sides $c\vec{K}$ and $c\vec{L}$ are non-unifiable. \end{itemize} We write $c\vec{M} \cnv_{\comp} Q$ to indicate that the rule is in $\Comp$. All other rules will be called (proper) rewrite rules, written $c\vec{M} \cnv_{\rew} K$. In our reduction strategy computation rules will always be applied first, and since they are non-overlapping, this part of the reduction is unique. However, since we allowed almost arbitrary rewrite rules, it may happen that in case no computation rule applies a term may be rewritten by different rules $\notin \Comp$. In order to obtain a deterministic procedure we then select the first applicable rewrite rule (This is a slight simplification of \cite{BergerEberlSchwichtenberg03}, where special functions $\select_c$ were used for this purpose). % \subsection{Logic with Non-Denoting Terms}\mylabel{SS:E-logic} % % The fact that in our arithmetic not every term normalizes (or better: % that normalization will not terminate for some terms) can be viewed % logically in such a way that we allow non-denoting terms. \section{Axioms}\mylabel{S:Axioms} The intended model of our theory is a many-sorted structure, with one sort for every type. We assume that the model consists of \emph{domains}\index{domain}, in the sense of domain theory (cf.\ \cite{Stoltenberg94}). The reason for the setting is that we want to deal with \emph{computable functionals}. Since their (mathematically correct) domains are the Scott-Ershov partial continuous functionals, this is the intended range of the quantifiers. \subsection{Languages for Algebras}\mylabel{SS:LangAlg} We now define the specific logical language we use for our algebras. It should be thought of as a form of arithmetical language, since it is supposed to describe our particular intended model. A \indexentry{variable} of a given type is interpreted by a continuous functional (object) of that type. We use the word \inquotes{variable} and not \inquotes{program variable}, since continuous functionals are not necessarily computable. So for each type $\rho$ we have \emph{general variables} \index{variable!general} $\hat{x}^{\rho}, \hat{y}^{\rho}, \dots$ of type $\rho$. In most cases we need to argue about existing (i.e.\ total) objects only. For the notion of totality we have to refer to \cite[Chapter 8.3]{Stoltenberg94}; particularly relevant here is exercise 8.5.7. To make formal arguments with quantifiers relativized to total objects more managable, we use a special sort of variables intended to range over such objects only. So for each type $\rho$ we have \emph{total variables} \index{variable!total} $x^{\rho}, y^{\rho}, \dots$ of type $\rho$. For readable in- and output, and also for ease in parsing, we may reserve certain strings as names for variables of a given type, e.g.\ $n,m$ for variables of type $\typeN$. Then also $n_0, n_1, n_2, \dots, m_0, \dots$ can be used for the same purpose. For example, $n_0, n_1, n_2, \dots, m_0, \dots$ range over total natural numbers, and $\hat{n}_0, \hat{n}_1, \hat{n}_2, \dots$ are general variables. We say that the \emph{degree of totality}\index{degree of totality} for the former is $1$, and for the latter $0$. A \indexentry{predicate variable} $\hat{P}$ of arity\index{arity!of a predicate variable} $\rho_1, \dots, \rho_n$ is a placeholder for a formula $A$ with distinguished (different) variables $\hat{x}_1, \dots, \hat{x}_n$ of types $\rho_1, \dots, \rho_n$. Such an entity is called a \indexentry{comprehension term}, written $\set{\hat{x}_1, \dots, \hat{x}_n}{A}$. By default we have the predicate variable $\falsum$\index{bottom} (of empty arity), called (logical) \indexentry{falsity}. It is viewed as a predicate variable rather than a predicate constant, since (when translating a classical proof into a constructive one) we want to substitute for $\falsum$. Often we will argue about \emph{Harrop formulas}\index{Harrop formula} only, i.e.\ formulas without computational content. For convenience we use a special sort of predicate variables intended to range over comprehension terms with Harrop formulas only. For example, $P_0, P_1, P_2, \dots, Q_0, \dots$ range over comprehension terms with Harrop formulas, and $\hat{P}_0, \hat{P}_1, \hat{P}_2, \dots$ are general predicate variables. We say that \emph{Harrop degree}\index{Harrop degree} for the former is $1$, and for the latter $0$. We also allow predicate constants with a fixed intended meaning. Predicate variables and constants are both called predicate symbols. The need for predicate constants comes up when e.g.\ an inductively defined set is expressed via a formula stating the existence of a generation tree; the kernel of this formula is to be axiomatized, using the tree constructors. Prime formulas built from predicate constants do not give rise to extracted terms, and cannot be substituted for. Specific predicate constants are \begin{itemize} \item $\atom$ of arity $(\typeBool)$, \item for every type $\rho$ \indexentry{equality} $\approx_{\rho}$ of arity $(\rho, \rho)$, and \item for every type $\rho$ % \indexentry{existence} $E_{\rho}$ and \indexentry{totality} $\Total_{\rho}$ of arity $(\rho)$. \end{itemize} Notice that for finitary algebras, e.g. $\typeN$, we have continuous boolean valued functions equality $=_{nat} \colon \typeN \to \typeN \to \typeBool$ and existence (definedness, totality) $e_{nat} \colon \typeN \to \typeBool$. Then we can express equality $r=s$ by $\atom(\eqrel{r}{s})$ and existence $E(r)$ by $\atom(e(r))$. A prime formula\index{formula!prime} has the form $P(r_1, \dots, r_n)$ with a predicate variable or constant $P$ and terms $r_1, \dots, r_n$. Write \begin{itemize} \item $T$, $F$ for $\atom(\true)$, $\atom(\false)$, \item $r=s$ for $\atom(\eqrel{r}{s})$, \item $E(r)$ for $\atom(e(r))$, and \item $r \approx s$ for $\approx(r,s)$. \end{itemize} \emph{Formulas}\index{formula} are built from prime formulas by \begin{itemize} \item implication $A \to B$, \item conjunction $A \land B$, \item tensor $A \ltensor B$, \item all quantification $\forall \hat{x}^{\rho} A$ and \item existential quantification $\ex \hat{x}^{\rho} A$. \end{itemize} Moreover we have classical existential quantification in an arithmetical and a logical form: \begin{alignat*}{2} &\exca \hat{x}_1 \dots \hat{x}_n. A_1 \ltensor \dots \ltensor A_m\index{exca@\texttt{exca}} &\quad& \text{arithmetical version} \\ &\excl \hat{x}_1 \dots \hat{x}_n. A_1 \ltensor \dots \ltensor A_m\index{excl@\texttt{excl}} && \text{logical version.} \end{alignat*} For all quantifiers we allow that the quantified variable is formed without \verb#^#, i.e.\ ranges over total objects only. \begin{remark*} When dealing with the classical existential quantifier, it is -- for obvious reasons -- useful to be able to unfold and fold it as it seems appropriate. In \textsc{Minlog} the commands are \texttt{fold-formula} and \texttt{unfold-formula}. However, notice that there are some slight difficulties in this context. To see the problems, consider \[ \excl x,y A \] If we read this as $\excl x \excl y A$, then the unfolded form would be \[ \neg \forall x \neg \neg \forall y \neg A \] However, it would be simpler if we could unfold this formula into the equivalent \[ \neg \forall x \forall y \neg A \] To achive this effect, we allow lists of variables after a classical existential quantifier, and unfold $\excl x,y A$ into the latter (shorter) formula, but $\excl x \excl y A$ into the former. Another (small) problem arises when we want to fold \begin{equation}\label{Eq:classex} \neg \forall x. A \to B \to \falsum \end{equation} The result should be $\excl x. A \land B$, but this is not quite correct, since the latter formula unfolds into $\neg \forall x. A \land B \to \falsum$. Therefore in \textsc{Minlog} there is a connective called \indexentry{tensor} (written $\ltensor$) with the property that \eqref{Eq:classex} folds into \[ \excl x. A \ltensor B \] and unfolds again into \eqref{Eq:classex}. \end{remark*} Formulas can be \emph{unfolded}\index{formula!unfolded} in the sense that all classical existential quantifiers are replaced according to their definiton \begin{align*} \exca \hat{x}_1 \dots \hat{x}_n. A_1 \ltensor \dots \ltensor A_m &:= (\forall \hat{x}_1 \dots \hat{x}_n. A_1 \to \dots \to A_m \to F) \to F \\ \excl \hat{x}_1 \dots \hat{x}_n. A_1 \ltensor \dots \ltensor A_m &:= (\forall \hat{x}_1 \dots \hat{x}_n. A_1 \to \dots \to A_m \to \falsum) \to \falsum \end{align*} Inversely a formula can be \emph{folded}\index{formula!folded} in the sense that classical existential quantifiers are introduced wherever possible. \emph{Comprehension terms}\index{comprehension term} have the form $\set{\vec{x}}{A}$; note that the formula $A$ may contain further free variables. \subsection{Algebras and Totality}\mylabel{SS:AlgTot} We use the natural numbers as a prototypical finitary algebra; recall Figure~\ref{F:nat}. Assume that $n$, $p$ are variables of type $\typeN$, $\typeBool$. Let $\approx$ denote the equality relation in the model. Recall the domain of type $\typeBool$, consisting of $\true$, $\false$ and the bottom element $\bottom$. The boolean valued functions equality $=_{nat} \colon \typeN \to \typeN \to \typeBool$ and existence (definedness, totality) $e_{nat} \colon \typeN \to \typeBool$ need to be continuous. So we have \begin{align*} \eqrel{0}{0} &\approx \true \\ \eqrel{0}{S \hat{n}} \approx \eqrel{S \hat{n}}{0} &\approx \false &e(0) &\approx \true \\ \eqrel{S \hat{n}_1}{S \hat{n}_2} &\approx \eqrel{\hat{n}_1}{\hat{n}_2} &e(S \hat{n}) &\approx e(\hat{n}) \\ \eqrel{\bottom_{nat}}{\hat{n}} \approx \eqrel{\hat{n}}{\bottom_{nat}} &\approx \bottom &e(\bottom_{\typeN}) &\approx \bottom \\ \eqrel{\infty_{nat}}{\hat{n}} \approx \eqrel{\hat{n}}{\infty_{nat}} &\approx \bottom &e(\infty_{\typeN}) &\approx \bottom \end{align*} % Write $T$, $F$ for $\atom(\true)$, $\atom(\false)$, $r=s$ for % $\atom(\eqrel{r}{s})$ and $E(r)$ for $\atom(e(r))$. We stipulate as axioms \begin{alignat*}{2} &T &\quad&\texttt{Truth-Axiom}\index{Truth-Axiom@\texttt{Truth-Axiom}} \\[1ex] &\hat{x} \approx \hat{x} &\quad&\text{\texttt{Eq-Refl}\index{Eq-Refl@\texttt{Eq-Refl}}} \\ &\hat{x}_1 \approx \hat{x}_2 \to \hat{x}_2 \approx \hat{x}_1 &\quad&\text{\texttt{Eq-Symm}\index{Eq-Symm@\texttt{Eq-Symm}}} \\ &\hat{x}_1 \approx \hat{x}_2 \to \hat{x}_2 \approx \hat{x}_3 \to \hat{x}_1 \approx \hat{x}_3 &\quad&\text{\texttt{Eq-Trans}\index{Eq-Trans@\texttt{Eq-Trans}}} \\[1ex] &\forall \hat{x} \hat{f}_1 \hat{x} \approx \hat{f}_2 \hat{x} \to \hat{f}_1 \approx \hat{f}_2 &&\text{\texttt{Eq-Ext}\index{Extensionality@\texttt{Extensionality}}} \\ &\hat{x}_1 \approx \hat{x}_2 \to \hat{P}(\hat{x}_1) \to \hat{P}(\hat{x}_2) &\quad&\text{\texttt{Eq-Compat}\index{Compatibility@\texttt{Compatibility}}} \\[1ex] &\forall \hat{x}_1, \hat{x}_2 \hat{P}(\pair{\hat{x}_1}{\hat{x}_2}) \to \forall \hat{p} \hat{P}(\hat{p}) &\quad&\text{\texttt{Pair-Elim}\index{Pair-Elim@\texttt{Pair-Elim}}} % \\[1ex] % &\hat{n}_1 \approx \hat{n}_2 \to E(\hat{n}_1) \to E(\hat{n}_2) \to % \hat{n}_1 = \hat{n}_2 % &&\text{\texttt{Eq-to-=}}\index{Eq-to-=@\texttt{Eq-to-=}} % \\ % &\hat{n}_1 = \hat{n}_2 \to \hat{n}_1 \approx \hat{n}_2 % &&\text{\texttt{=-to-Eq}}\index{equals-to-Eq@\texttt{=-to-Eq}} % \\[1ex] % &\Total(\hat{n}) \to E(\hat{n}) % &&\text{\texttt{Total-to-E}}\index{Total-to-E@\texttt{Total-to-E}} % \\ % &E(\hat{n}) \to \Total(\hat{n}) % &&\text{\texttt{E-to-Total}}\index{E-to-Total@\texttt{E-to-Total}} % \\[1ex] % &c_1 \vec{\hat{x}}_1 \approx c_2 \vec{\hat{x}}_2 \to F % &&\text{\texttt{Constr-Disjoint}}% \index{Constr-Disjoint@\texttt{Constr-Disjoint@}} % \\ % &c \vec{\hat{x}}_1 \approx c \vec{\hat{x}}_2 \to \hat{x}_{1i} \approx \hat{x}_{2i} % &&\text{\texttt{Constr-Inj}}% \index{Constr-Inj@\texttt{Constr-Inj@}} \\[1ex] &\Total_{\rho \to \sigma}(\hat{f}) \leftrightarrow \forall \hat{x}. \Total_{\rho}(\hat{x}) \to \Total_{\sigma}(\hat{f} \hat{x}) &&\text{\texttt{Total}}\index{Total@\texttt{Total}} \\ &\Total_{\rho}(c) &&\text{\texttt{Constr-Total}}\index{Constr-Total@\texttt{Constr-Total}} \\ &\Total(c \vec{\hat{x}}) \to \Total(\hat{x}_i) &&\text{\texttt{Constr-Total-Args}}\index{Constr-Total-Args@\texttt{Constr-Total-Args}} % \\ % &\Total_{\rho}(\bottom) \to F % &&\text{\texttt{Bottom-not-Total}}% \index{Bottom-Not-Total@\texttt{Bottom-Not-Total}} \\ \intertext{and for every finitary algebra, e.g.\ \texttt{nat}} &\hat{n}_1 \approx \hat{n}_2 \to E(\hat{n}_1) \to \hat{n}_1 = \hat{n}_2 &&\text{\texttt{Eq-to-=-1-nat}\index{Eq-to-=-1-nat@\texttt{Eq-to-=-1-nat}}} \\ &\hat{n}_1 \approx \hat{n}_2 \to E(\hat{n}_2) \to \hat{n}_1 = \hat{n}_2 &&\text{\texttt{Eq-to-=-2-nat}\index{Eq-to-=-2-nat@\texttt{Eq-to-=-2-nat}}} \\ &\hat{n}_1 = \hat{n}_2 \to \hat{n}_1 \approx \hat{n}_2 &&\text{\texttt{=-to-Eq-nat}\index{equals-to-Eq-nat@\texttt{=-to-Eq-nat}}} \\ &\hat{n}_1 = \hat{n}_2 \to E(\hat{n}_1) &&\text{\texttt{=-to-E-1-nat}\index{equals-to-E-1-nat@\texttt{=-to-E-1-nat}}} \\ &\hat{n}_1 = \hat{n}_2 \to E(\hat{n}_2) &&\text{\texttt{=-to-E-2-nat}\index{equals-to-E-2-nat@\texttt{=-to-E-2-nat}}} \\ &\Total(\hat{n}) \to E(\hat{n}) &&\text{\texttt{Total-to-E-nat}\index{Total-to-E-nat@\texttt{Total-to-E-nat}}} \\ &E(\hat{n}) \to \Total(\hat{n}) &&\text{\texttt{E-to-Total-nat}\index{E-to-Total-nat@\texttt{E-to-Total-nat}}} \end{alignat*} Here $c$ is a constructor. Notice that in $\Total(c \vec{\hat{x}}) \to \Total(\hat{x}_i)$, the type of $c \vec{\hat{x}}$ need not be a finitary algebra, and hence $\hat{x}_i$ may have a function type. % Further notice that $\Total_{\rho}(\bottom) \to F$ is also necessary % for $\rho$ an infinitary ground type. \begin{remark*} $(E(\hat{n}_1) \to \hat{n}_1 = \hat{n}_2) \to (E(\hat{n}_2) \to \hat{n}_1 = \hat{n}_2) \to \hat{n}_1 \approx \hat{n}_2$ is \emph{not} valid in our intended model (see Figure~\ref{F:nat}), since we have \emph{two} distinct undefined objects $\bottom_{nat}$ and $\infty_{nat}$. \end{remark*} We abbreviate \begin{alignat*}{2} &\forall \hat{x}. \Total_{\rho}(\hat{x}) \to A &\quad\hbox{by} \quad& \forall x A, \\ &\exists \hat{x}. \Total_{\rho}(\hat{x}) \land A &\quad\hbox{by} \quad& \exists x A. \end{alignat*} Formally, these abbreviations appear as axioms \begin{alignat*}{2} &\forall x \hat{P}(x) \to \forall \hat{x}. \Total(\hat{x}) \to \hat{P}(\hat{x}) &\quad&\texttt{All-AllPartial}\index{All-AllPartial@\texttt{All-AllPartial}} \\ &(\forall \hat{x}. \Total(\hat{x}) \to \hat{P}(\hat{x})) \to \forall x \hat{P}(x) &\quad&\texttt{AllPartial-All}\index{AllPartial-All@\texttt{AllPartial-All}} \\ &\exists x \hat{P}(x) \to \exists \hat{x}. \Total(\hat{x}) \land \hat{P}(\hat{x}) &\quad&\texttt{Ex-ExPartial}\index{Ex-ExPartial@\texttt{Ex-ExPartial}} \\ &(\exists \hat{x}. \Total(\hat{x}) \land \hat{P}(\hat{x})) \to \exists x \hat{P}(x) &\quad&\texttt{ExPartial-Ex}\index{ExPartial-Ex@\texttt{ExPartial-Ex}} \\ \intertext{and for every finitary algebra, e.g.\ \texttt{nat}} &\forall n \hat{P}(n) \to \forall \hat{n}. E(\hat{n}) \to \hat{P}(\hat{n}) &\quad&\texttt{All-AllPartial-nat}\index{All-AllPartial-nat@\texttt{All-AllPartial-nat}} \\ &(\exists \hat{n}. E(\hat{n}) \land \hat{P}(\hat{n})) \to \exists n \hat{P}(n) &\quad&\texttt{ExPartial-Ex-nat}\index{ExPartial-Ex-nat@\texttt{ExPartial-Ex-nat}} \end{alignat*} Notice that \texttt{AllPartial-All-nat}\index{AllPartial-All-nat@\texttt{AllPartial-All-nat},} i.e.\ $(\forall \hat{n}. E(\hat{n}) \to \hat{P}(\hat{n})) \to \forall n \hat{P}(n)$ is provable (since $E(n) \cnv T$). Similarly, \texttt{Ex-ExPartial-nat}\index{Ex-ExPartial-nat@\texttt{Ex-ExPartial-nat}}, i.e.\ $\exists n \hat{P}(n) \to \exists \hat{n}. E(\hat{n}) \land \hat{P}(\hat{n})$ is provable. Finally we have axioms for the existential quantifier \begin{alignat*}{2} &\forall \hat{x}^\alpha. \hat{P}(\hat{x}) \to \exists \hat{x}^\alpha \hat{P}(\hat{x}) &\quad&\text{\texttt{Ex-Intro}\index{Ex-Intro@\texttt{Ex-Intro}}} \\ &\exists \hat{x}^\alpha \hat{P}(\hat{x}) \to (\forall \hat{x}^\alpha. \hat{P}(\hat{x}) \to \hat{Q}) \to \hat{Q} &\quad&\text{\texttt{Ex-Elim}\index{Ex-Elim@\texttt{Ex-Elim}}} \end{alignat*} \subsection{Induction}\mylabel{SS:Ind} We now spell out what we mean by induction\index{induction} over simultaneous free algebras $\vec{\mu} = \mu\vec{\alpha} \,\vec{\kappa}$, with goal formulas $\forall x_j^{\mu_j}\,\hat{P}_j(x_j)$. For the constructor type \[ \kappa_i = \vec{\rho} \to (\vec{\sigma}_1 \to \alpha_{j_1}) \to \dots \to (\vec{\sigma}_n \to \alpha_{j_n}) \to \alpha_j \in \constrtypes(\vec{\alpha}) \] we have the \emph{step formula} \begin{align*} D_i := \forall y_1^{\rho_1},\dots,y_m^{\rho_m}, y_{m+1}^{\vec{\sigma}_1 \to \mu_{j_1}},\dots, y_{m+n}^{\vec{\sigma}_n \to \mu_{j_n}}. &\forall \vec{x}^{\vec{\sigma}_1} \, \hat{P}_{j_1}(y_{m+1} \vec{x}) \to \dots \to \\ &\forall \vec{x}^{\vec{\sigma}_n} \, \hat{P}_{j_n}(y_{m+n} \vec{x}) \to \\ &\hat{P}_j(\constr_i^{\vec{\mu}}(\vec{y})). \end{align*} Here $\vec{y} = y_1^{\rho_1},\dots,y_m^{\rho_m}, y_{m+1}^{\vec{\sigma}_1 \to \mu_{j_1}},\dots, y_{m+n}^{\vec{\sigma}_n \to \mu_{j_n}}$ are the \emph{components} of the object $\constr_i^{\vec{\mu}}(\vec{y})$ of type $\mu_j$ under consideration, and \[ \forall \vec{x}^{\vec{\sigma}_1} \, \hat{P}_{j_1}(y_{m+1} \vec{x}), \dots, \forall \vec{x}^{\vec{\sigma}_n} \, \hat{P}_{j_n}(y_{m+n} \vec{x}) \] are the hypotheses available when proving the induction step. The induction axiom $\ind_{\mu_j}^{\vec{x}, \vec{A}}$\index{Ind@\texttt{Ind}} with $\vec{x} = (x_j^{\mu_j})_{j=1, \dots, N}$ and $\vec{A} = (A_j)_{j=1, \dots, N} = (\hat{P}_j(x_j^{\mu_j}))_{j=1, \dots, N}$ then proves the formula \[ D_1 \to \dots \to D_k \to \forall x_j^{\mu_j}\,\hat{P}_j(x_j). \] We will often write $\ind_j^{\vec{x}, \vec{A}}$ for $\ind_{\mu_j}^{\vec{x}, \vec{A}}$, and omit the upper indices $\vec{x}, \vec{A}$ when they are clear from the context. In case of a non-simultaneous free algebra, i.e.\ of type $\mu \alpha\,\kappa$, for $\ind_\mu^{x, A}$ we normally write $\ind_{x,A}$. \begin{examples*} \begin{alignat*}{2} &\ind_{p, A} \colon \subst{A}{p}{\true} \to \subst{A}{p}{\false} \to \forall p^{\typeBool} A, \\ &\ind_{n, A} \colon \subst{A}{n}{0} \to (\forall n. A \to \subst{A}{n}{\suc n}) \to \ \forall n^{\typeN} A, \\ &\ind_{l, A} \colon \subst{A}{l}{\nil} \to (\forall x,l. A \to \subst{A}{l}{\cons(x,l)}) \to \forall l^{\typeL{\alpha}} A \\ &\ind_{x, A} \colon \forall y_1 \subst{A}{x}{\termSumIntroLeft(y_1)} \to \forall y_2 \subst{A}{x}{\termSumIntroRight(y_2)} \to \forall x^{\rho_1 \typeSum \rho_2} A. \end{alignat*} For the simultaneously defined algebras $\tree$ and $\tlist$ the induction axiom $\ind_{\tree}^{b,\bs,\hat{P}_1(b), \hat{P}_2(\bs)}$ is \begin{alignat*}{2} &D_1 \to D_2 \to D_3 \to D_4 \to \forall b^\tree \hat{P}_1(b) \\ \intertext{with} &D_1 := \forall n \hat{P}_1(\leaf(n)), \\ &D_2 := \forall \bs^{\tlist}. \hat{P}_2(\bs) \to \hat{P}_1(\branch(\bs)), \\ &D_3 := \hat{P}_2(\emp), \\ &D_4 := \forall b^{\tree},\bs^{\tlist}. \hat{P}_1(b) \to \hat{P}_2(\bs) \to \hat{P}_2(\tcons(b,\bs)). \end{alignat*} \end{examples*} \begin{remark}\mylabel{R:simpInd} It may happen that in an induction on simultaneously defined algebras one only needs to induct on some of those algebras. Then we can simplify the induction formula accordingly, by \begin{itemize} \item omitting all step formulas $D_i$ corresponding to constructor types with irrelevant value type $\tau_j$, and \item simplifying the remaining step formulas by omitting from the induction hypotheses $\forall \vec{x}^{\vec{\sigma}_1} \, \hat{P}_{j_1}(y_{m+1} \vec{x})$, \dots, $\forall \vec{x}^{\vec{\sigma}_n}\,\hat{P}_{j_n}(y_{m+n} \vec{x})$ all those corresponding to constructor types with irrelevant value type $\tau_{j_{\nu}}$. \end{itemize} In the $\tree,\tlist$-example, if we only want to induct on $\tlist$, then the step formulas are \begin{align*} &D_3 := \hat{P}_2(\emp), \\ &D_4 := \forall b^{\tree},\bs^{\tlist}. \hat{P}_2(\bs) \to \hat{P}_2(\tcons(b,\bs)). \end{align*} Hence the simplified induction axiom is \begin{align*} \ind_{\bs,\hat{P}_2(\bs)} &\colon D_3 \to D_4 \to \forall \bs^{\tlist} \hat{P}_2(\bs). \end{align*} \end{remark} \subsection{Cases}\mylabel{SS:Cases} % \begin{remark}\mylabel{R:CasesAxiom} There is an important variant of the induction axiom, where no induction hypotheses are used, i.e.\ all step formulas have the form \[ D_i := \forall y_1^{\rho_1},\dots,y_m^{\rho_m}, y_{m+1}^{\vec{\sigma}_1 \to \mu_{j_1}},\dots, y_{m+n}^{\vec{\sigma}_n \to \mu_{j_n}}. \hat{P}_j(\constr_i^{\vec{\mu}}(\vec{y})). \] This variant is called the \emph{cases axiom}\index{cases axiom}\index{cases-axiom@$\CasesAxiom$-axiom}; it distinguishes cases according to the outer constructor form. The formula of the cases axiom is \[ \CasesAxiom_{x_j, \hat{P}_j(x_j)} \colon D_{i_1}, \dots, D_{i_q} \to \forall x_j^{\mu_j} \hat{P}_j(x_j), \] where $D_{i_1}, \dots, D_{i_q}$ consists of all $D_i$ concerning constructors for $\mu_j$. Examples are \begin{alignat*}{2} &\CasesAxiom_{n, A} \colon \subst{A}{n}{0} \to \forall n \subst{A}{n}{\suc n} \to \ \forall n^{\typeN} A, \\ &\CasesAxiom_{l, A} \colon \subst{A}{l}{\nil} \to \forall x,l \subst{A}{l}{\cons(x,l)} \to \forall l^{\typeL{\alpha}} A. \end{alignat*} % Again it may happen that in an argument by cases on simultaneously % defined algebras one only needs to consider some of those algebras. % Then we can simplify the cases axiom accordingly, by % \begin{itemize} % % \item omitting all step formulas $D_i$ corresponding to constructor % types with irrelevant value type $\tau_{j_{\nu}}$. % % \end{itemize} In the $\tree,\tlist$-example, if we want to distinguish cases on $\tlist$, then the step formulas are \begin{align*} D_3 &:= \hat{P}(\emp), \\ D_4 &:= \forall b^{\tree} \forall \bs^{\tlist} \hat{P}(\tcons(b,\bs)). \end{align*} Hence the cases axiom is \begin{align*} \CasesAxiom_{\bs,\hat{P}(\bs)} &\colon \hat{P}(\emp) \to \forall b^{\tree} \forall \bs^{\tlist} \hat{P}(\tcons(b,\bs)) \to \forall \bs^{\tlist} \hat{P}(\bs). \end{align*} % \end{remark} \begin{comment} \begin{example*} Consider the two formulas \begin{align*} &\forall b^{\tree} \ex \bs_0^{\tlist}. \mathsf{FlatTlist\,\bs_0 \land \mathsf{LeavesTree\,b = \mathsf{LeavesTlist\,bs_0 \\ &\forall \bs^{\tlist} \ex \bs_0^{\tlist}. \mathsf{FlatTlist\,\bs_0 \land \mathsf{LeavesTlist\,\bs = \mathsf{LeavesTlist\,bs_0 \end{align*} Here are the recursive definitions of the predicates involved. \begin{align*} \mathsf{FlatTlist\,\emp &:= \true \\ \mathsf{FlatTlist}(\tcons\, b\, \bs) &:= \mathsf{And}(\mathsf{LeafPred\,b) (\mathsf{FlatTlist\,\bs) \\[6pt] \mathsf{LeafPred}(\leaf\, n) &:= \true \\ \mathsf{LeafPred}(\branch\, \bs) &:= \false \\[6pt] \mathsf{LeavesTree}(\leaf\, n) &:= 1 \\ \mathsf{LeavesTree}(\branch\, \bs) &:= \mathsf{LeavesTlist\,\bs \\[6pt] \mathsf{LeavesTlist\,\emp &:= 0 \\ \mathsf{LeavesTlist}(\tcons\, b\, \bs) &:= (\mathsf{LeavesTree\,b) + (\mathsf{LeavesTlist\,\bs) \end{align*} \end{example*} \end{comment} \section{Notes}\mylabel{S:AlgNotes} Section~\ref{S:Rec} is based on an extension of Tait's\index{Tait} method of strong computability predicates. The definition of these predicates and also the proof are related to Zucker's\index{Zucker} proof of strong normalization of his term system for recursion on the first three number or tree classes. However, Zucker uses a combinatory term system and defines strong computability for closed terms only. Following some ideas in an unpublished note of Berger\index{Berger}, Benl\index{Benl} (in his diploma thesis \cite{Benl98}) adapted this proof to terms in the simply typed $\lambda$-calculus, possibly involving free variables. Here this proof is extended to the case of simultaneously defined free algebras. In a recent paper of Abel\index{Abel} and Altenkirch \index{Altenkirch} \cite{AbelAltenkirch00}, a similar result is proved with a different method, involving Aczel's\index{Aczel} notion of a set-based relation. It seems worthwile to verify that an appropriate variant of the standard Tait proof also yields this result. However, an additional merit of the method of Abel and Altenkirch is that they are also able to treat co-inductive types. We have not tried to extend our's in this direction as well. \chapter{Unification and Proof Search} % Sources: search00/s.tex and proofth/ss01/PT01/logic.tex % Modification here: Huet's unification algorithm \mylabel{C:SearchHO} We describe a proof search method suitable for minimal logic with higher order functionals. It is based on Huet's \cite{Huet75} unification algorithm for the simply typed lambda calculus, which is treated first. Huet's unification algorithm does not terminate in general; this cannot be avoided, since it is well known that higher order unification is undecidable. This non-termination can be avoided if we restrict ourselves to a certain fragment of higher order (simply typed) minimal logic. This fragment is determined by requiring that every higher order variable $Y$ can only occur in a context $Y \vec{x}$, where $\vec{x}$ are distinct bound variables in the scope of the operator binding $Y$, and of opposite polarity. Note that for first order logic this restriction does not mean anything, since there are no higher order variables. However, when designing a proof search algorithm for first order logic only, one is naturally led into this fragment of higher order logic, where the algorithm works as well. \section{Huet's Unification Algorithm} \mylabel{S:Huet} We work in the simply typed $\lambda$-calculus, with the usual conventions. For instance, whenever we write a term we assume that it is correctly typed. \emph{Substitutions}\index{substitution} are denoted by $\varphi, \psi, \rho$. The result of applying a substitution $\varphi$ to a term $r$ or a formula $A$ is written as $r \varphi$ or $A \varphi$, with the understanding that after the substitution all terms are brought into long normal form. $Q$ always denotes a $\forall \exists \forall$-prefix, say $\forall \vec{x} \exists \vec{y} \forall \vec{z}$, with distinct variables. We call $\vec{x}$ the \emph{signature variables}\index{variable!signature}, $\vec{y}$ the \emph{flexible variables}\index{variable!flexible} and $\vec{z}$ the \emph{forbidden variables}\index{variable!forbidden} of $Q$, and write $Q_\exists$ for the existential part $\exists \vec{y}$ of $Q$. A \emph{$Q$-term}\index{Q-term@$Q$-term} is a term with all its free variables in $Q$, and similarly a \emph{$Q$-formula}\index{Q-formula@$Q$-formula} is a formula with all its free variables in $Q$. A \emph{$Q$-substitution}\index{Q-substitution@$Q$-substitution} is a substitution of $Q$-terms. A \indexentry{unification problem} $\C{U}$ consists of a $\forall \exists \forall$-prefix $Q$ and a conjunction $C$ of equations between $Q$-terms of the same type, i.e.\ $\bigland_{i=1}^n r_i = s_i$. We may assume that each such equation is of the form $\lambda \vec{x} r = \lambda \vec{x} s$ with the same $\vec{x}$ (which may be empty) and $r, s$ of ground type. A \indexentry{solution} to such a unification problem $\C{U}$ is a $Q$-substitution $\varphi$ such that for every $i$, $r_i \varphi = s_i \varphi$ holds (i.e.\ $r_i \varphi$ and $s_i \varphi$ have the same normal form). We sometimes write $C$ as $\vec{r} = \vec{s}$, and (for obvious reasons) call it a list of unification pairs. We now define the unification algorithm. It takes a unification problem $\C{U} = QC$ and produces a not necessarily well-founded tree (called \indexentry{matching tree} by Huet \cite{Huet75}) with nodes labelled by unification problems and vertices labelled by substitutions. \begin{definition*}[Unification algorithm] We distinguish cases according to the form of the unification problem, and either give the transition done by the algorithm, or else state that it fails. \emph{Case} identity, i.e.\ $Q.r=r \land C$. Then \[ (Q.r=r \land C) \unifalg{\eps} QC. \] \emph{Case} $\xi$, i.e.\ $Q.\lambda \vec{x}\,r = \lambda \vec{x}\,s \land C$. We may assume here that the bound variables $\vec{x}$ are the same on both sides. \[ (Q.\lambda \vec{x}\,r = \lambda \vec{x}\,s \land C) \unifalg{\eps} Q\forall \vec{x}.r = s \land C. \] \emph{Case} rigid-rigid, i.e.\ $Q.f \vec{r} = g \vec{s} \land C$ with both $f$ and $g$ rigid, that is either a signature variable or else a forbidden variable. If $f$ is different from $g$ then fail. If $f$ equals $g$, \[ (Q.f \vec{r} = f \vec{s} \land C) \unifalg{\eps} Q. \vec{r} = \vec{s} \land C. \] \emph{Case} flex-rigid, i.e.\ $Q.u \vec{r} = f \vec{s} \land C$ with $f$ rigid. Then the algorithm branches into one \indexentry{imitation} branch and $m$ \indexentry{projection} branches, where $r = r_1, \dots, r_m$. Imitation replaces the flexible head $u$, using the substitution $\rho = \subst{}{u}{\lambda \vec{x}.f (h_1 \vec{x}) \dots (h_n \vec{x})}$ with new variables $\vec{h}$ and $\vec{x}$. For $r_i$ we have a projection if and only if the final value type of $r_i$ is the (ground) type of $f \vec{s}$. Then the $i$-th projections pulls $r_i$ in front, by $\rho = \subst{}{u}{\lambda \vec{x}.x_i (h_1 \vec{x}) \dots (h_{n_i} \vec{x})}$. In each of these branches we have \[ (Q.u \vec{r} = f \vec{s} \land C) \unifalg{\rho} Q'.(u \vec{r} = f \vec{s} \land C) \rho, \] where $Q'$ is obtained from $Q$ by removing $\exists u$ and adding $\exists \vec{h}$. \emph{Case} flex-flex, i.e.\ $Q.u \vec{r} = v \vec{s} \land C$. If there is a first flex-rigid or rigid-flex equation in $C$, pull this equation (possibly swapped) to the front and apply case flex-rigid. Otherwise, i.e.\ if all equations are between terms with flexible heads, pick a new variable $z$ of ground type and let $\rho$ be the substitution mapping each of these flexible heads $u$ to $\lambda \vec{x} z$. \[ (Q.u \vec{r} = v \vec{s} \land C) \unifalg{\rho} Q. \emptyset. \] This concludes the definition of the unification algorithm. \end{definition*} Clearly $\rho$ is defined on flexible variables of $Q$ only, and its value terms have no free occurrences of forbidden variables from $Q$. Our next task is to prove correctness and completeness of this algorithm. \begin{theorem}[Huet] \mylabel{T:Huet} Let a unification problem $\C{U}$ consisting of a $\forall \exists \forall$-prefix $Q$ and a list $\vec{r} = \vec{s}$ of unification pairs be given. Then either \begin{itemize} \item the unification algorithm can make a transition, and \begin{itemize} \item (correctness) for every transition $\C{U} \unifalg{\rho} \C{U}'$ and $\C{U}'$-solution $\varphi'$ the substitution $(\rho \circ \varphi') {\restriction} Q_\exists$ is a $\C{U}$-solution, and \item (completeness) for every $\C{U}$-solution $\varphi$ there is a transition $\C{U} \unifalg{\rho} \C{U}'$ and $\C{U}'$-solution $\varphi'$ such that $\varphi = (\rho \circ \varphi') {\restriction} Q_\exists$, and moreover $\mu(\varphi') \le \mu(\varphi)$ with $<$ in case flex-rigid, or else \end{itemize} \item the unification algorithm fails, and there is no $\C{U}$-solution, or else \item the unification algorithm succeeds, and $\vec{r} = \vec{s}$ is empty. \end{itemize} Here $\mu(\varphi)$ denotes the number of applications in the value terms of $\varphi$. \end{theorem} \begin{proof} \emph{Case} identity, i.e.\ $Q.r=r \land C \unifalg{\eps} QC$. Then correctness and completeness are obvious. \emph{Case} $\xi$, i.e.\ $Q.\lambda \vec{x}\,r = \lambda \vec{x}\,s \land C \unifalg{\eps} Q \forall \vec{x}.r=s \land C$. Again correctness and completeness are obvious. \emph{Case} rigid-rigid, i.e.\ $Q.f \vec{r} = g \vec{s} \land C \unifalg{\eps} Q. \vec{r} = \vec{s} \land C$. If $f \ne g$, then the unification algorithm fails and there is no $\C{U}$-solution. If $f = g$, again correctness and completeness are obvious. \emph{Case} flex-rigid, i.e.\ $\C{U}$ is $Q.u \vec{r} = f \vec{s} \land C$. Correctness. Assume $\C{U} \unifalg{\rho} \C{U'}$, which is to say $(Q.u \vec{r} = f \vec{s} \land C) \unifalg{\rho} Q'.(u \vec{r} = f \vec{s} \land C) \rho$. Let $\varphi'$ be a $\C{U'}$-solution, i.e.\ $(u \vec{r} = f \vec{s} \land C)\rho \varphi'$. Then clearly $(\rho \circ \varphi') {\restriction} Q_\exists$ is a $\C{U}$-solution. Completeness. Assume $\varphi$ is a $\C{U}$-solution, i.e.\ $(u \vec{r} = f \vec{s} \land C)\varphi$. We have to find a transition $\C{U} \unifalg{\rho} \C{U}'$ and a $\C{U}'$-solution $\varphi'$ such that $\varphi = (\rho \circ \varphi') {\restriction} Q_\exists$, and moreover $\mu(\varphi') < \mu(\varphi)$. Now $u \varphi$ must be of the form $\lambda \vec{x}.a \vec{t}$ with $a$ either $f$ or $x_i$. \emph{Subcase} $a$ is $f$. Then take the imitation branch, i.e.\ replace the flexible head $u$ using the substitution $\rho = \subst{}{u}{\lambda \vec{x}.f (h_1 \vec{x}) \dots (h_n \vec{x})}$ with new variables $\vec{h}$ and $\vec{x}$. Recall $(Q.u \vec{r} = f \vec{s} \land C) \unifalg{\rho} Q'.(u \vec{r} = f \vec{s} \land C) \rho$, where $Q'$ is obtained from $Q$ by removing $\exists u$ and adding $\exists \vec{h}$. Define $\varphi'$ on the new variables $\vec{h}$ by $h_j \varphi' := \lambda \vec{x} t_j$, and as $\varphi$ on all other variables. Then $\varphi = (\rho \circ \varphi') {\restriction} Q_\exists$ because of \[ u \rho \varphi' = \lambda \vec{x}.f\bigl( (\vec{h} \varphi') \vec{x} \bigr) = \lambda \vec{x}.f \vec{t} = u \varphi, \] and our assumption says that \[ f \subst{\vec{t}}{\vec{x}}{\vec{r} \varphi} = f (\vec{s} \varphi) \] Now $\varphi'$ is a $\C{U'}$-solution because of \[ (u \vec{r}) \rho \varphi' = f\bigl( \subst{\vec{t}}{\vec{x}}{\vec{r} \rho \varphi'} \bigr) = f\bigl( \subst{\vec{t}}{\vec{x}}{\vec{r} \varphi} \bigr) = f (\vec{s} \varphi) = f (\vec{s} \rho \varphi'). \] \emph{Subcase} $a$ is $x_i$. Then take the $i$-th projections branch, i.e.\ replace the flexible head $u$ using the substitution $\rho = \subst{}{u}{\lambda \vec{x}.x_i (h_1 \vec{x}) \dots (h_{n_i} \vec{x})}$ with new variables $\vec{h}$ and $\vec{x}$. Recall $(Q.u \vec{r} = f \vec{s} \land C) \unifalg{\rho} Q'.(u \vec{r} = f \vec{s} \land C) \rho$, where $Q'$ is obtained from $Q$ by removing $\exists u$ and adding $\exists \vec{h}$. Define $\varphi'$ on the new variables $\vec{h}$ by $h_j \varphi' := \lambda \vec{x} t_j$, and as $\varphi$ on all other variables. Then $\varphi = (\rho \circ \varphi') {\restriction} Q_\exists$ because of \[ u \rho \varphi' = \lambda \vec{x}.x_i\bigl( (\vec{h} \varphi') \vec{x} \bigr) = \lambda \vec{x}.x_i \vec{t} = u \varphi, \] and our assumption says that \[ (r_i \varphi)\subst{\vec{t}}{\vec{x}}{\vec{r} \varphi} = f (\vec{s} \varphi) \] Now $\varphi'$ is a $\C{U'}$-solution because of \[ (u \vec{r}) \rho \varphi' = (r_i \rho \varphi') \bigl( \subst{\vec{t}}{\vec{x}}{\vec{r} \rho \varphi'} \bigr) = (r_i \varphi) \bigl( \subst{\vec{t}}{\vec{x}}{\vec{r} \varphi} \bigr) = f (\vec{s} \varphi) = f (\vec{s} \rho \varphi'). \] \emph{Case} flex-flex, i.e.\ $Q.C$ where all equations in $C$ are between terms with flexible heads. Then for a new variable $z$ of ground type we have taken $\rho$ to be the substitution mapping each of these flexible heads to $\lambda \vec{x} z$, and \[ (QC) \unifalg{\rho} Q. \emptyset. \] Correctness. $(\rho \circ \varphi') {\restriction} Q_\exists$ clearly is a $\C{U}$-solution for every $\varphi'$. Completeness. To simplify the notation let $C = (u r = v s)$. Assume that $\varphi$ is a $\C{U}$-solution, i.e.\ $(u r = v s)\varphi$. Then $u\varphi = \lambda x.f_1 t_1$ and $v\varphi = \lambda x.f_2 t_2$, and by assumption $f_1 \subst{t_1}{x}{r} = f_2 \subst{t_2}{x}{s}$. Define $\varphi'$ to be $\varphi$ with the assignments to $u,v$ removed and $z \mapsto f_1 \subst{t_1}{x}{r}$ ($=f_2 \subst{t_2}{x}{s}$) added. Then clearly $(\rho \circ \varphi') {\restriction} Q_\exists = \varphi$, and $\mu(\varphi') \le \mu(\varphi)$. \end{proof} \begin{corollary} \mylabel{C:unif} Given a unification problem $\C{U} = QC$, and a success node in the matching tree, labelled with a prefix $Q'$ (i.e.\ a unification problem $\C{U}'$ with no unification pairs). Then by composing the substitution labels on the branch leading to this node we obtain a pair $(Q', \rho)$ with a \inquotes{transition} substitution $\rho$ and such that for any $Q'$-substitution $\varphi'$, $(\rho \circ \varphi') {\restriction} Q_\exists$ is an $\C{U}$-solution. Moreover, every $\C{U}$-solution can be obtained in this way, for an appropriate success node. Since the empty substitution is a $Q'$-substitution, $\rho {\restriction} Q_\exists$ is a $\C{U}$-solution, which is most general in the sense stated. \fini \end{corollary} \section{The Pattern Unification Algorithm} \mylabel{S:PatternUnif} We modify restrict the notion of $Q$-term as follows. \emph{$Q$-terms}\index{Q-term@$Q$-term} are inductively defined by the following clauses. \begin{itemize} \item If $u$ is a universally quantified variable in $Q$ or a constant, and $\vec{r}$ are $Q$-terms, then $u \vec{r}$ is a $Q$-term. \item For any flexible variable $y$ and distinct forbidden variables $\vec{z}$ from $Q$, $y \vec{z}$ is a $Q$-term. %$ \item If $r$ is a $Q \forall z$-term, then $\lambda z r$ is a $Q$-term. \end{itemize} Explicitely, $r$ is a $Q$-term iff all its free variables are in $Q$, and for every subterm $y \vec{r}$ of $r$ with $y$ free in $r$ and flexible in $Q$, the $\vec{r}$ are distinct variables either $\lambda$-bound in $r$ (such that $y \vec{r}$ is in the scope of this $\lambda$) or else forbidden in $Q$. \emph{$Q$-goals}\index{Q-goal@$Q$-goal} and \emph{$Q$-clauses}\index{Q-clause@$Q$-clause} are simultaneously defined by \begin{itemize} \item If $\vec{r}$ are $Q$-terms, then $P \vec{r}$ is a $Q$-goal as well as a $Q$-clause. \item If $D$ is a $Q$-clause and $G$ is a $Q$-goal, then $D \to G$ is a $Q$-goal. \item If $G$ is a $Q$-goal and $D$ is a $Q$-clause, then $G \to D$ is a $Q$-clause. \item If $G$ is a $Q\forall x$-goal, then $\forall x G$ is a $Q$-goal. \item If $\subst{D}{y}{Y \vec{z}}$ is a $\forall \vec{x} \exists \vec{y}, Y \forall \vec{z}$-clause, then $\forall y D$ is a $\forall \vec{x} \exists \vec{y} \forall \vec{z}$-clause. \end{itemize} Explicitely, a formula $A$ is a \emph{$Q$-goal}\index{Q-goal@$Q$-goal} iff all its free variables are in $Q$, and for every subterm $y \vec{r}$ of $A$ with $y$ either existentially bound in $A$ (with $y \vec{r}$ in the scope) or else free in $A$ and flexible in $Q$, the $\vec{r}$ are distinct variables either $\lambda$- or universally bound in $A$ (such that $y \vec{r}$ is in the scope) or else free in $A$ and forbidden in $Q$. A \emph{$Q$-substitution}\index{Q-substitution@$Q$-substitution} is a substitution of $Q$-terms. A \indexentry{pattern unification problem} $\C{U}$ consists of a $\forall \exists \forall$-prefix $Q$ and a conjunction $C$ of equations between $Q$-terms of the same type, i.e.\ $\bigland_{i=1}^n r_i = s_i$. We may assume that each such equation is of the form $\lambda \vec{x} r = \lambda \vec{x} s$ with the same $\vec{x}$ (which may be empty) and $r, s$ of ground type. A \indexentry{solution} to such a unification problem $\C{U}$ is a $Q$-substitution $\varphi$ such that for every $i$, $r_i \varphi = s_i \varphi$ holds (i.e.\ $r_i \varphi$ and $s_i \varphi$ have the same normal form). We sometimes write $C$ as $\vec{r} = \vec{s}$, and (for obvious reasons) call it a list of unification pairs. We now define the pattern unification algorithm. It takes a unification problem $\C{U} = QC$ and returns a substitution $\rho$ and another unification problem $\C{U}' = Q'C'$. Note that $\rho$ will be neither a $Q$-substitution nor a $Q'$-substitution, but will have the property that \begin{itemize} \item $\rho$ is defined on flexible variables of $Q$ only, and its value terms have no free occurrences of forbidden variables from $Q$, \item if $G$ is a $Q$-goal, then $G \rho$ is a $Q'$-goal, and \item whenever $\varphi'$ is a $\C{U}'$-solution, then $(\rho \circ \varphi') {\restriction} Q_\exists$ is a $\C{U}$-solution. \end{itemize} \begin{definition*}[Pattern Unification Algorithm] We distinguish cases according to the form of the unification problem, and either give the transition done by the algorithm, or else state that it fails. \emph{Case} identity, i.e.\ $Q.r=r \land C$. Then \[ (Q.r=r \land C) \unifalg{\eps} QC. \] \emph{Case} $\xi$, i.e.\ $Q.\lambda \vec{x}\,r = \lambda \vec{x}\,s \land C$. We may assume here that the bound variables $\vec{x}$ are the same on both sides. \[ (Q.\lambda \vec{x}\,r = \lambda \vec{x}\,s \land C) \unifalg{\eps} Q\forall \vec{x}.r = s \land C. \] \emph{Case} rigid-rigid, i.e.\ $Q.f \vec{r} = f \vec{s} \land C$ with $f$ either a signature variable or else a forbidden variable. \[ (Q.f \vec{r} = f \vec{s} \land C) \unifalg{\eps} Q. \vec{r} = \vec{s} \land C. \] \emph{Case} flex-flex with equal heads, i.e.\ $Q.u \vec{y} = u \vec{z} \land C$. \[ (Q.u \vec{y} = u \vec{z} \land C) \unifalg{\rho} Q'.C \rho \] with $\rho = \subst{}{u}{\lambda \vec{y}.u' \vec{w}}$, $Q'$ is $Q$ with $\exists u$ replaced by $\exists u'$, and $\vec{w}$ an enumeration of those $y_i$ which are identical to $z_i$ (i.e.\ the variable at the same position in $\vec{z}$). Notice that $\lambda \vec{y}.u' \vec{w} = \lambda \vec{z}.u' \vec{w}$. \emph{Case} flex-flex with different heads, i.e.\ $Q.u \vec{y} = v \vec{z} \land C$. \[ (Q.u \vec{y} = v \vec{z} \land C) \unifalg{\rho} Q' C \rho, \] where $\rho$ and $Q'$ are defined as follows. Let $\vec{w}$ be an enumeration of the variables both in $\vec{y}$ and in $\vec{z}$. Then $\rho = \subst{}{u,v}{\lambda \vec{y}.u' \vec{w}, \lambda \vec{z}.u' \vec{w}}$, and $Q'$ is $Q$ with $\exists u, \exists v$ removed and $\exists u'$ inserted. \emph{Case} flex-rigid, i.e.\ $Q.u \vec{y} = t \land C$ with $t$ rigid, i.e.\ not of the form $v \vec{z}$ with flexible $v$. \emph{Subcase} occurrence check: $t$ contains (a critical subterm with head) $u$. Fail. \emph{Subcase} pruning: $t$ contains a subterm $v \vec{w}_1 z \vec{w}_2$ with $\exists v$ in $Q$, and $z$ free in $t$ but not in $\vec{y}$. \[ (Q.u \vec{y} = t \land C) \unifalg{\rho} Q'.u \vec{y} = t \rho \land C \rho \] where $\rho = \subst{}{v}{\lambda \vec{w}_1 \lambda z \lambda \vec{w}_2.v' \vec{w}_1 \vec{w}_2}$, $Q'$ is $Q$ with $\exists v$ replaced by $\exists v'$. \emph{Subcase} pruning impossible: $\lambda \vec{y} t$ (after all pruning steps are done still) has a free occurrence of a forbidden variable $z$. Fail. \emph{Subcase} explicit definition: otherwise. \[ (Q.u \vec{y} = t \land C) \unifalg{\rho} Q' C \rho \] where $\rho = \subst{}{u}{\lambda \vec{y} t}$, and $Q'$ is obtained from $Q$ by removing $\exists u$. This concludes the definition of the pattern unification algorithm. \end{definition*} Our next task is to prove that this algorithm indeed has the three properties stated above. The first one ($\rho$ is defined on flexible variables of $Q$ only, and its value terms have no free occurrences of forbidden variables from $Q$) is obvious from the definition. We now prove the second one; the third one will be proved next. \begin{lemma} \mylabel{L:qprime} If $Q \unifalg{\rho} Q'$ and $G$ is a $Q$-goal, then $G \rho$ is a $Q'$-goal. \end{lemma} \begin{proof} We distinguish cases according to the definition of the unification algorithm. All cases are straightforward: \emph{Cases} identity, $\xi$ and rigid-rigid. Then $\rho = \eps$ and the claim is trivial. \emph{Case} flex-flex with equal heads. Then $\rho = \subst{}{u}{\lambda \vec{y}.u' \vec{w}}$ with $\vec{w}$ a sublist of $\vec{y}$, and $Q'$ is $Q$ with $\exists u$ replaced by $\exists u'$. Then clearly $\subst{G}{u}{\lambda \vec{y}.u' \vec{w}}$ is a $Q'$-goal (recall that after a substitution we always normalize). \emph{Case} flex-flex with different heads. Then $\rho = \subst{}{u,v}{\lambda \vec{y}.u' \vec{w}, \lambda \vec{z}.u' \vec{w}}$ with $\vec{w}$ an enumeration of the variables both in $\vec{y}$ and in $\vec{z}$, and $Q'$ is $Q$ with $\exists u, \exists v$ removed and $\exists u'$ inserted. Again clearly $\subst{G}{u,v}{\lambda \vec{y}.u' \vec{w}, \lambda \vec{z}.u' \vec{w}}$ is a $Q'$-goal. \emph{Case} flex-rigid, \emph{Subcase} pruning: Then $\rho = \subst{}{v}{\lambda \vec{w}_1, z, \vec{w}_2.v' \vec{w}_1 \vec{w}_2}$, and $Q'$ is $Q$ with $\exists v$ replaced by $\exists v'$. Suppose $G$ is a $Q$-goal. Then clearly $\subst{G}{v}{\lambda \vec{w}_1, z, \vec{w}_2.v' \vec{w}_1 \vec{w}_2}$ is a $Q'$-goal. \emph{Case} flex-rigid, \emph{Subcase} explicit definition: Then $\rho = \subst{}{u}{\lambda \vec{y} t}$ with a $Q$-term $\lambda \vec{y} t$ without free occurrences of forbidden variables, and $Q'$ is obtained from $Q$ by removing $\exists u$. Suppose $G$ is a $Q$-goal. Then clearly $\subst{G}{u}{\lambda \vec{y} t}$ form) is a $Q'$-goal. \end{proof} Let $Q \unifprefixalg{\rho} Q'$ mean that for some $C, C'$ we have $QC \unifalg{\rho} Q'C'$. Write $Q \unifprefixalg{\rho}^* Q'$ if there are $\rho_1, \dots, \rho_n$ and $Q_1, \dots, Q_{n-1}$ such that \[ Q \unifprefixalg{\rho_1} Q_1 \unifprefixalg{\rho_2} \dots \unifprefixalg{\rho_{n-1}} Q_{n-1} \unifprefixalg{\rho_n} Q', \] and $\rho = \rho_1 \circ \dots \circ \rho_n$. \begin{corollary} \mylabel{C:qprimeit} If $Q \unifprefixalg{\rho}^* Q'$ and $G$ is a $Q$-goal, then $G \rho$ is a $Q'$-goal. \fini \end{corollary} \begin{lemma} \mylabel{L:onestepunif} Let a unification problem $\C{U}$ consisting of a $\forall \exists \forall$-prefix $Q$ and a list $\vec{r} = \vec{s}$ of unification pairs be given. Then either \begin{itemize} \item the unification algorithm makes a transition $\C{U} \unifalg{\rho} \C{U}'$, and \begin{align*} \Phi' \colon &\C{U}'\hbox{-solutions} \to \C{U}\hbox{-solutions}\\ &\varphi' \mapsto (\rho \circ \varphi') {\restriction} Q_\exists \end{align*} is well-defined and we have $\Phi \colon \C{U}\hbox{-solutions} \to \C{U}'\hbox{-solutions}$ such that $\Phi'$ is inverse to $\Phi$, i.e.\ $\Phi'(\Phi \varphi) = \varphi$, or else \item the unification algorithm fails, and there is no $\C{U}$-solution. \end{itemize} \end{lemma} \begin{proof} \emph{Case} identity, i.e.\ $Q.r=r \land C \unifalg{\eps} QC$. Let $\Phi$ be the identity. \emph{Case} $\xi$, i.e.\ $Q.\lambda \vec{x}\,r = \lambda \vec{x}\,s \land C \unifalg{\eps} Q \forall \vec{x}.r=s \land C$. Let again $\Phi$ be the identity. \emph{Case} rigid-rigid, i.e.\ $Q.f \vec{r} = f \vec{s} \land C \unifalg{\eps} Q. \vec{r} = \vec{s} \land C$. Let again $\Phi$ be the identity. \emph{Case} flex-flex with equal heads, i.e.\ $Q.u \vec{y} = u \vec{z} \land C \unifalg{\rho} Q'.C \rho$ with $\rho = \subst{}{u}{\lambda \vec{y}.u' \vec{w}}$, $Q'$ is $Q$ with $\exists u$ replaced by $\exists u'$, and $\vec{w}$ an enumeration of those $y_i$ which are identical to $z_i$ (i.e.\ the variable at the same position in $\vec{z}$). Notice that $\lambda \vec{y}.u' \vec{w} = \lambda \vec{z}.u' \vec{w}$. 1. $\Phi'$ is well-defined: Let $\varphi'$ be a $\C{U}'$-solution, i.e.\ assume that $C \rho \varphi'$ holds. We must show that $\varphi := (\rho \circ \varphi') {\restriction} Q_\exists$ is a $\C{U}$-solution. For $u \vec{y} = u \vec{z}$: We need to show $(u \varphi)\vec{y} = (u \varphi)\vec{z}$. But $u \varphi = u \rho \varphi' = (\lambda \vec{y}.u' \vec{w})\varphi'$. Hence $(u \varphi)\vec{y} = (u \varphi)\vec{z}$ by the construction of $\vec{w}$. For $(r = s) \in C$: We need to show $(r = s)\varphi$. But by assumption $(r = s)\rho \varphi'$ holds, and $r = s$ has all its flexible variables from $Q_\exists$. 2. Definition of $\Phi\colon \C{U}\hbox{-solutions} \to \C{U}'\hbox{-solutions}$. Let a $Q$-substitution $\varphi$ be given such that $(u \vec{y} = u \vec{z})\varphi$ and $C \varphi$. Define $u'(\Phi \varphi) := \lambda \vec{w}.(u \varphi)\vec{w} \vec{0}$ (w.l.o.g), and $v(\Phi \varphi) := v$ for every other variable $v$ in $Q_\exists$. $\Phi \varphi =: \varphi'$ is a $\C{U}'$-solution: Let $(r = s) \in C$. Then $(r = s)\varphi$ by assumption, for $\varphi$ is a $Q$-substitution such that $C \varphi$ holds. We must show \[ (r = s)\rho \varphi'. \] Notice that our assumption $(u \varphi) \vec{y} = (u \varphi) \vec{z}$ implies that the normal form of both sides can only contain the variables in $\vec{w}$. Therefore \begin{alignat*}{2} u \rho \varphi' &= (\lambda \vec{y}.u' \vec{w})\varphi' \\ &= \lambda \vec{y}.(\lambda \vec{w}.(u \varphi)\vec{w} \vec{0})\vec{w}\\ &= \lambda \vec{y}.(u \varphi)\vec{w} \vec{0}\\ &= \lambda \vec{y}.(u \varphi)\vec{y} \\ &= u \varphi \end{alignat*} and hence $(r = s)\rho \varphi'$. 3. $\Phi' (\Phi \varphi) = \varphi$: So let $\varphi$ be an $\C{U}$-solution, and $\varphi' := \Phi \varphi$. Then \begin{alignat*}{2} u \bigl(\Phi'\varphi' \bigr) &= u \bigl((\rho \circ \varphi') {\restriction} Q_\exists \bigr)\\ &= u \rho \varphi'\\ &= u\varphi, &\quad& \hbox{as proved in 2.} \end{alignat*} For every other variable $v$ in $Q_\exists$ we obtain \begin{align*} v \bigl(\Phi'\varphi' \bigr) &= v \bigl((\rho \circ \varphi') {\restriction} Q_\exists \bigr)\\ &= v \rho \varphi'\\ &= v \varphi'\\ &= v \varphi. \end{align*} \emph{Case} flex-flex with different heads, i.e.\ $\C{U}$ is $Q. u \vec{y} = v \vec{z} \land C$. Let $\vec{w}$ be an enumeration of the variables both in $\vec{y}$ and in $\vec{z}$. Then $\rho = \subst{}{u,v}{\lambda \vec{y}.u' \vec{w}, \lambda \vec{z}.u' \vec{w}}$, $Q'$ is $Q$ with $\exists u, \exists v$ removed and $\exists u'$ inserted, and $\C{U}' = Q' C \rho$. 1. $\Phi'$ is well-defined: Let $\varphi'$ be a $\C{U}'$-solution, i.e.\ assume that $C \rho \varphi'$ holds. We must show that $\varphi := (\rho \circ \varphi') {\restriction} Q_\exists$ is a $\C{U}$-solution. For $u \vec{y} = v \vec{z}$: We need to show $(u \varphi)\vec{y} = (v \varphi)\vec{z}$. But $(u \varphi)\vec{y} = (u \rho \varphi')\vec{y} = (\lambda \vec{y}.(u' \varphi')\vec{w})\vec{y} = (u' \varphi')\vec{w}$, and similarly $(v \varphi)\vec{z} = (u' \varphi')\vec{w}$. For $(r = s) \in C$: We need to show $(r = s)\varphi$. But since $u'$ is a new variable, $\varphi$ and $\rho \circ \varphi'$ coincide on all variables free in $r = s$, and we have $(r = s)\rho \varphi'$ by assumption. 2. Definition of $\Phi\colon \C{U}\hbox{-solutions} \to \C{U}'\hbox{-solutions}$. Let a $Q$-substitution $\varphi$ be given such that $(u \vec{y} = v \vec{z})\varphi$ and $C \varphi$. Define \begin{alignat*}{2} u'(\Phi \varphi) &:= \lambda \vec{w}.(u \varphi)\vec{w} \vec{0} &\quad& \hbox{w.l.o.g.; $\vec{0}$ arbitrary}\\ v'(\Phi \varphi) &:= \lambda \vec{w}.(v \varphi)\vec{0} \vec{w}\\ w(\Phi \varphi) &:= w \varphi && \hbox{otherwise, i.e.\ $w \ne u', v', u$ flexible.} \end{alignat*} Since by assumption $(u \varphi)\vec{y} = (v \varphi)\vec{z}$, the normal forms of both $(u \varphi)\vec{y}$ and $(v \varphi)\vec{z}$ can only contain the common variables $\vec{w}$ from $\vec{y}, \vec{z}$ free. Hence, for $\varphi' := \Phi \varphi$, $u \rho \varphi' = u \varphi$ by the argument in the previous case, and similarly $v \rho \varphi' = v \varphi$. Since $r \varphi = s \varphi$ ($(r = s) \in C$ arbitrary) by assumption, and $\rho$ only affects $u$ and $v$, we obtain $r \rho \varphi' = s \rho \varphi'$, as required. $\Phi' (\Phi \varphi) = \varphi$ can now be proved as in the previous case. \emph{Case} flex-rigid, $\C{U}$ is $Q.u \vec{y} = t \land C$. \emph{Subcase} occurrence check: $t$ contains (a critical subterm with head) $u$. Then clearly there is no $Q$-substitution $\varphi$ such that $(u \varphi)\vec{y} = t \varphi$. \emph{Subcase} pruning: Here $t$ contains a subterm $v \vec{w}_1 z \vec{w}_2$ with $\exists v$ in $Q$, and $z$ free in $t$. Then $\rho = \subst{}{v}{\lambda \vec{w}_1, z, \vec{w}_2.v' \vec{w}_1 \vec{w}_2}$, $Q'$ is $Q$ with $\exists v$ replaced by $\exists v'$, and $\C{U}' = Q'.u \vec{y} = t \rho \land C \rho$. 1. $\Phi'$ is well-defined: Let $\varphi'$ be a $\C{U}'$-solution, i.e.\ $(u \varphi')\vec{y} = t \rho \varphi'$, and $r \rho \varphi' = s \rho \varphi'$ for $(r = s) \in C$. We must show that $\varphi := (\rho \circ \varphi') {\restriction} Q_\exists$ is a $\C{U}$-solution. For $u \vec{y} = t$: We need to show $(u \varphi)\vec{y} = t \rho \varphi'$. But \begin{alignat*}{2} (u \varphi)\vec{y} &= (u \rho \varphi')\vec{y}\\ &= (u \varphi')\vec{y} &\quad& \hbox{since $\rho$ does not touch $u$}\\ &= t \rho \varphi' &&\hbox{by assumption.} \end{alignat*} For $(r = s) \in C$: We need to show $(r = s)\varphi$. But since $v'$ is a new variable, $\varphi = (\rho \circ \varphi') {\restriction} Q_\exists$ and $\rho \circ \varphi'$ coincide on all variables free in $r = s$, and the claim follows from $(r = s)\rho \varphi'$. 2. Definition of $\Phi\colon \C{U}\hbox{-solutions} \to \C{U}'\hbox{-solutions}$. For a $\C{U}$-solution $\varphi$ define \begin{alignat*}{2} v'(\Phi \varphi) &:= \lambda \vec{w}_1, \vec{w}_2.(v \varphi)\vec{w}_1 0 \vec{w}_2\\ w(\Phi \varphi) &:= w \varphi &\quad& \hbox{otherwise, i.e.\ $w \ne v', v$ flexible.} \end{alignat*} Since by assumption $(u \varphi)\vec{y} = t \varphi$, the normal form of $t \varphi$ cannot contain $z$ free. Therefore, for $\varphi' := \Phi \varphi$, \begin{align*} v \rho \varphi' &= (\lambda \vec{w}_1, z, \vec{w}_2.v' \vec{w}_1 \vec{w}_2)\varphi'\\ &= \lambda \vec{w}_1, z, \vec{w}_2. (\lambda \vec{w}_1, \vec{w}_2.(v \varphi)\vec{w}_1 0 \vec{w}_2) \vec{w}_1 \vec{w}_2\\ &= \lambda \vec{w}_1, z, \vec{w}_2.(v \varphi)\vec{w}_1 0 \vec{w}_2\\ &= \lambda \vec{w}_1, z, \vec{w}_2.(v \varphi)\vec{w}_1 z \vec{w}_2\\ &= v \varphi. \end{align*} Hence $\varphi' = \Phi \varphi$ satisfies $(u \varphi')\vec{y} = t \rho \varphi'$. For $r = s$ this follows by the same argument. $\Phi' (\Phi \varphi) = \varphi$ can again be proved as in the previous case. \emph{Subcase} pruning impossible: Then $\lambda \vec{y} t$ has an occurrence of a universally quantified (i.e.\ forbidden) variable $z$. Therefore clearly there is no $Q$-substitution $\varphi$ such that $(u \varphi)\vec{y} = t \varphi$. \emph{Subcase} explicit definition. Then $\rho = \subst{}{u}{\lambda \vec{y} t}$, $Q'$ is obtained from $Q$ by removing $\exists u$, and $\C{U}' = Q'C \rho$. Note that $\rho$ is a $Q'$-substitution, for we have performed the pruning steps. 1. $\Phi'$ is well-defined: Let $\varphi'$ be a $\C{U}'$-solution, i.e.\ $r \rho \varphi' = s \rho \varphi'$ for $(r = s) \in C$. We must show that $\varphi := (\rho \circ \varphi') {\restriction} Q_\exists$ is an $\C{U}$-solution. For $u \vec{y} = t$: We need to show $(u \rho \varphi')\vec{y} = t \rho \varphi'$. But \begin{alignat*}{2} (u \rho \varphi')\vec{y} &= ((\lambda \vec{y} t) \varphi')\vec{y}\\ &= t \varphi' \\ &= t \rho \varphi' &\quad& \hbox{since $u$ does not appear in $t$.} \end{alignat*} For $(r = s) \in C$: We need to show $(r = s)\varphi$. But this clearly follows from $(r = s)\rho \varphi'$. 2. Definition of $\Phi\colon \C{U}\hbox{-solutions} \to \C{U}'\hbox{-solutions}$, and proof of $\Phi'( \Phi \varphi) = \varphi$. For a $\C{U}$-solution $\varphi$ define $\Phi \varphi = \varphi {\restriction} Q_\exists$. Then \[ u \rho \varphi' = \lambda \vec{y} t \varphi' = \lambda \vec{y} t \varphi = u \varphi, \] and clearly $ v \rho \varphi' = v \varphi$ for all other flexible $\varphi$. For $(r = s) \in C$, from $r \varphi = s \varphi$ we easily obtain $r \varphi' = s \varphi'$. \end{proof} It is not hard to see that the unification algorithm terminates, by defining a measure that decreases with each transition. \begin{corollary} \mylabel{C:patternunif} Given a unification problem $\C{U} = QC$, the unification algorithm either fails, and there is no $\C{U}$-solution, or else returns a pair $(Q', \rho)$ with a \inquotes{transition} substitution $\rho$ and a prefix $Q'$ (i.e.\ a unification problem $\C{U}'$ with no unification pairs) such that for any $Q'$-substitution $\varphi'$, $(\rho \circ \varphi') {\restriction} Q_\exists$ is an $\C{U}$-solution, and every $\C{U}$-solution can be obtained in this way. Since the empty substitution is a $Q'$-substitution, $\rho {\restriction} Q_\exists$ is a $\C{U}$-solution, which is most general in the sense stated. \fini \end{corollary} \section{Proof Search} \mylabel{S:Search} A \emph{$Q$-sequent}\index{Q-sequent@$Q$-sequent} has the form $\C{P} \seqarrow G$, where $\C{P}$ is a list of $Q$-clauses and $G$ is a $Q$-goal. We write $M[\C{P}]$ to indicate that all assumption variables in the derivation $M$ are assumptions of clauses in $\C{P}$. Write $\vdash^n S$ for a set $S$ of sequents if there are derivations $M_i^{G_i}[\C{P}_i]$ in long normal form for all $(\C{P}_i \seqarrow G_i) \in S$ such that $\sum \dep{M_i} \le n$. Let $\vdash^{ G 1 1 -> (all n,k,l.G n k -> G(n+1)l -> G(n+2)(k+l)) -> all n ex k,l. G n k & G(n+1)l")) (assume "Init-Zero" "Init-One" "Step") (ind) ; Base (ex-intro (pt "0")) (ex-intro (pt "1")) (prop) ; Step (assume "n" "IH") (by-assume-with "IH" "k" "IH-k") (by-assume-with "IH-k" "l" "IH-l") (ex-intro (pt "l")) (ex-intro (pt "k+l")) (search) \end{verbatim} The extracted program can now be obtained as follows. \begin{verbatim} (define Fib-neterm (nt (proof-to-extracted-term (theorem-name-to-proof "Fib")))) \end{verbatim} The extracted term is obtained by \begin{verbatim} (term-to-string Fib-neterm) \end{verbatim} which yields \begin{verbatim} (Rec nat=>nat@@nat)(0@1)([n1,p2]right p2@left p2+right p2) \end{verbatim} This clearly is a linear algorithm. To run it, type \begin{verbatim} (pp (nt (make-term-in-app-form Fib-neterm (pt "13")))) \end{verbatim} which yields \begin{verbatim} 233@377 \end{verbatim} We can also use an \inquotes{external} extraction, yielding Scheme code: \begin{verbatim} (term-to-expr Fib-neterm) \end{verbatim} produces \begin{verbatim} ((natrec (cons 0 1)) (lambda (n1) (lambda (p2) (cons (cdr p2) (+ (car p2) (cdr p2)))))) \end{verbatim} To run this code, we need to give a Scheme-definition of \texttt{natrec}, that is recursion on the natural numbers: \begin{verbatim} (define (natrec init) (lambda (step) (lambda (n) (if (= 0 n) init ((step n) (((natrec init) step) (- n 1))))))) \end{verbatim} This again is a linear algorithm. We will see later that a classical proof yields a linear algorithm as well, which however uses functions instead of pairs. \begin{remark*} There are other algorithms to compute the Fibonacci numbers $\alpha_n$, which run in logarithmic time. By definition we have \[ \begin{pmatrix}0 & 1 \\ 1 & 1 \end{pmatrix} \begin{pmatrix} \alpha_{n-2} \\ \alpha_{n-1} \end{pmatrix} =\begin{pmatrix} \alpha_{n-1} \\ \alpha_{n-2}+\alpha_{n-1} \end{pmatrix} =\begin{pmatrix} \alpha_{n-1} \\ \alpha_n \end{pmatrix}, \] hence with $A = \bigl(\begin{smallmatrix} 0 & 1 \\ 1 & 1 \end{smallmatrix} \bigr) \in \D{R}^{2 \times 2}$ \[ A^n \begin{pmatrix}0\\ 1 \end{pmatrix} = \begin{pmatrix} \alpha_n \\ \alpha_{n+1} \end{pmatrix}. \] So an easy way to compute the Fibonacci numbers is by computing the powers of $A$. This can be done in time $O(\log(n))$, since \begin{align*} A^{2n} &= (A^n)^2 \\ A^{2n+1} &= A^{2n} \cdot A \end{align*} It is possible to obtain this algorithm as computational content of a proof: Use $u,v$ to denote vectors in $\D{Z}^2$ and $X,Y$ to denote matrices in $\D{Z}^{2 \times 2}$. Let $G(n,u)$ mean that $u$ is the vector of the $n$-th and $(n+1)$-th Fibonacci number. $G$ can be axiomatized by \[ G(1,\begin{pmatrix} 1 \\ 1 \end{pmatrix}),\quad G(n,\begin{pmatrix} \alpha \\ \beta \end{pmatrix}) \to G(n+1,\begin{pmatrix} \beta \\ \alpha+\beta \end{pmatrix}). \] Then prove by induction on the positive (binary) numbers $n$ \[ \forall n \ex X \forall m,u. G(m,u) \to G(m+n,Xu). \] Clearly this $X$ must be $A^n$. Using some linear algebra, one can even give an explicit formula for $\alpha_n$. To this end we first diagonalize the matrix $A$. The general recipe runs as follows. Form $B := A^t$, hence $B=A$ in our case. The eigenvalues can be computed as the zeros of the characteristic polynomial $p_A= |A-tE| = -t(1-t)-1= t^2-t-1$. So the eigenvalues are $\lambda_{1,2} = \frac{1 \pm\sqrt{5}}{2}$. We now compute eigenvectors for these eigenvalues. For $\lambda_1=\frac{1+\sqrt{5}}{2}$. Let $x=\bigl(\begin{smallmatrix} \xi_1 \\\xi_2 \end{smallmatrix} \bigr)$, and solve the linear equation system \[ \begin{pmatrix} -\lambda_1 & 1 \\ 1 & 1-\lambda_1 \\ \end{pmatrix} \begin{pmatrix} \xi_1 \\ \xi_2 \end{pmatrix} = \begin{pmatrix}0\\ 0\end{pmatrix}. \] So $-\lambda_1 \xi_1+\xi_2$=0. For $\xi_1=1$ we obtain $\xi_2=\lambda_1$, so $x_1=\bigl(\begin{smallmatrix} 1 \\\lambda_1 \end{smallmatrix} \bigr)$ is an eigenvector. For $\lambda_2 = \frac{1-\sqrt{5}}{2}$ we similarly obtain $x_2=\bigl(\begin{smallmatrix} 1 \\\lambda_2 \end{smallmatrix} \bigr)$ as an eigenvector. For the dimensions of the eigenspaces and the multiplicities of the eigenvalues we obtain \begin{align*} &\dim(\Eig(f_B,\lambda_1)) = 1 = \mu(p_B,\lambda_1) \\ &\dim(\Eig(f_B,\lambda_2)) = 1 = \mu(p_B,\lambda_2), \end{align*} so the matrix $B$ is diagonalizable, and $\bigl(\begin{smallmatrix} 1 \\\lambda_1 \end{smallmatrix} \bigr), \bigl(\begin{smallmatrix} 1 \\\lambda_2 \end{smallmatrix} \bigr)$ is a basis of $\D{R}^2$ consisting of eigenvectors of $f_B$. Let \[ T := \begin{pmatrix}1 & 1 \\ \lambda_1 & \lambda_2 \end{pmatrix}, \qquad S := T^t = \begin{pmatrix}1 & \lambda_1 \\ 1 & \lambda_2 \end{pmatrix}. \] Then the general theory yields \[ SAS^{-1} = D := \begin{pmatrix} \lambda_1 & 0\\ 0 &\lambda_2 \end{pmatrix}, \qquad S^{-1} = \frac{1}{\lambda_2 - \lambda_1} \begin{pmatrix} \lambda_2 & -\lambda_1 \\ -1 & 1 \end{pmatrix}. \] We can now give an explicit formula for $A^n$ and hence also for the Fibonacci numbers. \begin{alignat*}{2} A^n &= (S^{-1}DS)^n \\ &= \underbrace{(S^{-1}DS)\cdot\ldots\cdot(S^{-1}DS)}_{n\ \mathrm{times}} \\ &= S^{-1}D^nS \\ &= S^{-1} \begin{pmatrix} \lambda_1^n & 0\\0 & \lambda_2^n \end{pmatrix}S \\ &= S^{-1} \begin{pmatrix} \lambda_1^n & 0\\0 & \lambda_2^n \end{pmatrix} \begin{pmatrix}1 & \lambda_1 \\ 1 & \lambda_2 \end{pmatrix} \\ &= S^{-1} \begin{pmatrix} \lambda_1^n & \lambda_1^{n+1} \\ \lambda_2^n & \lambda_2^{n+1} \end{pmatrix} \\ &= \frac{1}{\lambda_2-\lambda_1} \begin{pmatrix} \lambda_2 & -\lambda_1 \\ -1 & 1 \end{pmatrix} \begin{pmatrix} \lambda_1^n & \lambda_1^{n+1} \\ \lambda_2^n & \lambda_2^{n+1} \end{pmatrix} \\ &= -\frac{1}{\sqrt{5}} \begin{pmatrix} \lambda_1^n \lambda_2 - \lambda_2^n \lambda_1 & \lambda_2 \lambda_1^{n+1} - \lambda_1 \lambda_2^{n+1}\\ -\lambda_1^n+\lambda_2^n & -\lambda_1^{n+1}+\lambda_2^{n+1} \\ \end{pmatrix} \\ &= -\frac{1}{\sqrt{5}} \begin{pmatrix} -\lambda_1^{n-1} + \lambda_2^{n-1} & -\lambda_1^n + \lambda_2^n \\ -\lambda_1^n+\lambda_2^n & -\lambda_1^{n+1}+\lambda_2^{n+1} \\ \end{pmatrix} &\quad&\hbox{since $\lambda_1 \lambda_2=-1$} \\ &= \frac{1}{\sqrt{5}} \begin{pmatrix} \beta_{n-1} & \beta_n \\ \beta_n & \beta_{n+1} \\ \end{pmatrix} &&\hbox{with $\beta_n:=\lambda_1^n-\lambda_2^n$.} \end{alignat*} Hence by the above \[ \begin{pmatrix} \alpha_n \\\alpha_{n+1} \end{pmatrix}= \frac{1}{\sqrt{5}} \begin{pmatrix} \beta_{n-1} & \beta_n \\ \beta_n & \beta_{n+1} \\ \end{pmatrix} \begin{pmatrix} 0\\1 \end{pmatrix}. \] In particular we then have for $\alpha_n$ \[ \alpha_n = \frac{1}{\sqrt{5}} \beta_n= \frac{1}{\sqrt{5}} \left[\left(\frac{1+\sqrt{5}}{2} \right)^n -\left(\frac{1-\sqrt{5}}{2} \right)^n \right]. \] \end{remark*} Other examples for program extraction from constructive proofs are abundant in the literature. Major case studies done in our group include the development of the Warshall algorithm\index{Warshall algorithm} in \cite{BergerSchwichtenbergSeisenberger01}, and of the Dijkstra algorithm\index{Dijkstra algorithm} in \cite{BenlSchwichtenberg99}. \subsection{The Warshall Algorithm} Our language consists of the following relation and function symbols. We deal with a binary relation $R$ on $\{0,1,\dots,n{-}1 \}$, whose transitive closure is to be determined. \begin{alignat*}{2} &\elem{k}{x} &\quad&\hbox{$k$ occurs in the path $x$,} \\ &\Rf{x} && \hbox{$x$ is a repitition free path,} \\ &P_i(x,j,k) &&\hbox{$x$ is an $R$-path from $j$ to $k$ whose inner elements are $nat=>boole nat=>nat=>nat=>list nat)r ([j,k][if (j=k) ([x6,x7]x6) ([x6,x7]x7)]j: ([if (r j k) ([x6,x7]x6) ([x6,x7]x7)](j::k:)(Nil nat))) ([i,f,j,k] [if (f j k=(Nil nat)) ([x8,x9]x8) ([x8,x9]x9)] ([if (f j i=(Nil nat)) ([x8,x9]x8) ([x8,x9]x9)] (Nil nat) ([if (f i k=(Nil nat)) ([x8,x9]x8) ([x8,x9]x9)] (Nil nat)(f j i|f i k))) (f j k)) \end{verbatim} To make this program more readable, we give the (primitive) recursion equations for the defined function $f$. For given $i,j,k$ they either yield a path from $j$ to $k$ with innee elements $(nat=>nat=>nat)=>nat) ([f1]f1 0 1) ([n1,H2,f3] H2([n4,n5]f3 n5(n4+n5))) \end{verbatim} It is rather obvious that this can be translated into the \textsc{Scheme} program above. \subsection{Wellfoundedness of $\D{N}$} \mylabel{SS:Wf} An interesting phenomenon can occur when we extract a program from a classical proof which uses the minimum principle. Consider as a simple example the wellfoundedness\index{wellfoundedness} of $<$ on $\D{N}$, i.e. \[ \forall f^{\typeN \to \typeN} \excl k.f(k+1)nat nat=>nat nat=>nat=>nat=>nat)([n]n)f ([k,m]0) ([n,g,k,m] [if (f(Succ m)nat nat=>nat=>nat)f ([m]0) ([n,f1,m][if (f(Succ m) prop}. Types of formulas will be necessary for normalization by evaluation of proof terms. The type \texttt{nulltype} will be useful when assigning to a formula the type of a program to be extracted from a proof of this formula. Types not involving the types \texttt{atomic}, \texttt{existential}, \texttt{prop} and \texttt{nulltype} are called object types. % \subsection{Type variables and constants} Type variable\index{type variable} names are $\texttt{alpha}, \texttt{beta} \dots$; $\texttt{alpha}$ is provided by default. To have infinitely many type variables available, we allow appended indices: $\texttt{alpha}1, \texttt{alpha}2, \texttt{alpha}3 \dots$ will be type variables. The only type constants\index{type constant} are $\texttt{atomic}, \texttt{existential}, \texttt{prop}$ and $\texttt{nulltype}$. \subsection{Generalitites for substitutions, type substitutions} \mylabel{SS:GenSubst} Generally, a substitution is a list $((x_1\ t_1) \dots (x_n\ t_n))$ of lists of length two, with distinct variables $x_i$ and such that for each $i$, $x_i$ is different from $t_i$. It is understood as simultaneous substitution. The default equality is \texttt{equal?}; however, in the versions ending with \texttt{-wrt} (for \inquotes{with respect to}) one can provide special notions of equality. To construct substitutions we have \begin{alignat*}{2} &\texttt{(make-substitution \textsl{args} \textsl{vals})}% \index{make-substitution@\texttt{make-substitution}} \\ &\texttt{(make-substitution-wrt \textsl{arg-val-equal?}\ \textsl{args} \textsl{vals})}% \index{make-substitution-wrt@\texttt{make-substitution-wrt}} \\ &\texttt{(make-subst \textsl{arg} \textsl{val})}% \index{make-subst@\texttt{make-subst}} \\ &\texttt{(make-subst-wrt \textsl{arg-val-equal?}\ \textsl{arg} \textsl{val})}% \index{make-subst-wrt@\texttt{make-subst-wrt}} \\ &\texttt{empty-subst}\index{empty-subst@\texttt{empty-subst}} \end{alignat*} Accessing a substitution is done via the usual access operations for association list: \texttt{assoc} and \texttt{assoc-wrt}. We also provide \begin{alignat*}{2} &\texttt{(restrict-substitution-wrt \textsl{subst} \textsl{test?})}% \index{restrict-substitution-wrt@\texttt{restrict-substitution-wrt}} \\ &\texttt{(restrict-substitution-to-args \textsl{subst} \textsl{args})}% \index{restrict-substitution-to-args@\texttt{restrict-substitution-to-args}} \\ &\texttt{(substitution-equal?\ \textsl{subst1} \textsl{subst2})}% \index{substitution-equal?@\texttt{substitution-equal?}} \\ &\texttt{(substitution-equal-wrt?\ \textsl{arg-equal?}\ \textsl{val-equal?}\ \textsl{subst1} \textsl{subst2})}% \index{substitution-equal-wrt?@\texttt{substitution-equal-wrt?}} \\ &\texttt{(subst-item-equal-wrt?\ \textsl{arg-equal?}\ \textsl{val-equal?}\ \textsl{item1} \textsl{item2})}% \index{subst-item-equal-wrt?@\texttt{subst-item-equal-wrt?}} \\ &\texttt{(consistent-substitutions-wrt?} \\ &\qquad\texttt{\textsl{arg-equal?}\ \textsl{val-equal?}\ \textsl{subst1} \textsl{subst2})}% \index{consistent-substitutions-wrt?@\texttt{consistent-substitutions-wrt?}} \end{alignat*} \emph{Composition}\index{composition} $\vartheta \sigma$ of two substitutions \begin{align*} \vartheta &= ((x_1\ s_1) \dots (x_m\ s_m)), \\ \sigma &= ((y_1\ t_1) \dots (y_n\ t_n)) \end{align*} is defined as follows. In the list $((x_1\ s_1\sigma) \dots (x_m\ s_m\sigma)\ (y_1\ t_1) \dots (y_n\ t_n))$ remove all bindings $(x_i\ s_i\sigma)$ with $s_i\sigma = x_i$, and also all bindings $(y_j\ t_j)$ with $y_j \in \{x_1, \dots, x_n\}$. It is easy to see that composition is associative, with the empty substitution as unit. We provide \begin{alignat*}{2} \texttt{(compose-substitutions-wrt}\ &\texttt{\textsl{substitution-proc} \textsl{arg-equal?}} \\ &\texttt{\textsl{arg-val-equal?}\ \textsl{subst1} \textsl{subst2}})% \index{compose-substitutions-wrt@\texttt{compose-substitutions-wrt}} \end{alignat*} We shall have occasion to use these general substitution procedures for the following kinds of substitutions \[ \begin{tabular}{|l|l|l|l|} \hline for &called &domain equality &arg-val-equality \\ \hline type variables &\texttt{tsubst}\index{tsubst@\texttt{tsubst}} &\texttt{equal?} &\texttt{equal?} \\ object variables &\texttt{osubst}\index{osubst@\texttt{osubst}} &\texttt{equal?} &\texttt{var-term-equal?}\index{var-term-equal?@\texttt{var-term-equal?}} \\ predicate variables &\texttt{psubst}\index{psubst@\texttt{psubst}} &\texttt{equal?} &\texttt{pvar-cterm-equal?}% \index{pvar-cterm-equal?@\texttt{pvar-cterm-equal?}} \\ assumption variables &\texttt{asubst}\index{asubst@\texttt{asubst}} &\texttt{avar=?}\index{avar=?@\texttt{avar=?}} &\texttt{avar-proof-equal?}% \index{avar-proof-equal?@\texttt{avar-proof-equal?}} \\ \hline \end{tabular} \] The following substitutions will make sense for a \[ \begin{tabular}{|l|l|} \hline type &\texttt{tsubst} \\ term &\texttt{tsubst} and \texttt{osubst} \\ formula &\texttt{tsubst} and \texttt{osubst} and \texttt{psubst} \\ proof &\texttt{tsubst} and \texttt{osubst} and \texttt{psubst} and \texttt{asubst} \\ \hline \end{tabular} \] In particular, for \indexentry{type substitutions} \texttt{tsubst} we have \begin{alignat*}{2} &\texttt{(type-substitute \textsl{type} \textsl{tsubst})}% \index{type-substitute@\texttt{type-substitute}} \\ &\texttt{(type-subst \textsl{type} \textsl{tvar} \textsl{type1})}% \index{type-subst@\texttt{type-subst}} \\ &\texttt{(compose-t-substitutions \textsl{tsubst1} \textsl{tsubst2})}% \index{compose-t-substitutions@\texttt{compose-t-substitutions}} \end{alignat*} A display function for type substitutions is \begin{align*} &\texttt{(display-t-substitution \textsl{tsubst})}% \index{display-t-substitution@\texttt{display-t-substitution}} \end{align*} \subsection{Simultaneous free algebras as base types} We allow the formation of inductively generated types $\mu \vec{\alpha}\,\vec{\kappa}$, where $\vec{\alpha} = \alpha_1,\dots,\alpha_n$ is a list of distinct type variables, and $\vec{\kappa}$ is a list of \inquotes{constructor types} whose argument types contain $\alpha_1,\dots,\alpha_n$ in strictly positive positions only. For instance, $\mu\alpha(\alpha, \alpha \to \alpha)$ is the type of natural numbers; here the list $(\alpha, \alpha \to \alpha)$ stands for two generation principles: $\alpha$ for \inquotes{there is a natural number} (the number $0$), and $\alpha \to \alpha$ for \inquotes{for every natural number there is another one} (its successor). Let an infinite supply of \emph{type variables} $\alpha, \beta$ be given. Let $\vec{\alpha} = (\alpha_j)_{j=1,\dots,m}$ be a list of distinct type variables. \emph{Types} $\rho, \sigma, \tau, \mu, \nu \in \Types$ and \emph{constructor types} $\kappa \in \constrtypes(\vec{\alpha})$ are defined inductively as follows. \begin{align*} &\frac{\vec{\rho}, \vec{\sigma}_1, \dots, \vec{\sigma}_n \in \Types} {\vec{\rho} \to (\vec{\sigma}_1 \to \alpha_{j_1}) \to \dots \to (\vec{\sigma}_n \to \alpha_{j_n}) \to \alpha_j \in \constrtypes(\vec{\alpha})} \quad\hbox{($n \ge 0$)} \\ &\frac{\kappa_1, \dots, \kappa_n \in \constrtypes(\vec{\alpha})} {(\mu \vec{\alpha}\,(\kappa_1, \dots, \kappa_n))_j \in \Types} \quad\hbox{($n \ge 1$, $j=1,\dots,m$)}\qquad \frac{\rho, \sigma \in \Types}{\rho \to \sigma \in \Types} \end{align*} Here $\vec{\rho}$ is short for a list $\rho_1,\dots,\rho_k$ ($k\ge 0$) of types and $\vec{\rho} \to \sigma$ means $\rho_1 \to \dots \to \rho_k \to \sigma$, associated to the right. We shall use $\mu, \nu$ for types of the form $(\mu \vec{\alpha}\,(\kappa_1, \dots, \kappa_n))_j$ only, and for types $\vec{\tau}= (\tau_j)_{j=1,\dots,m}$ and a constructor type $\kappa = \vec{\rho} \to (\vec{\sigma}_1 \to \alpha_{j_1}) \to \dots \to (\vec{\sigma}_n \to \alpha_{j_n}) \to \alpha_j \in \constrtypes(\vec{\alpha})$ let \[ \kappa[\vec{\tau}] := \vec{\rho} \to (\vec{\sigma}_1 \to \tau_{j_1}) \to \dots \to (\vec{\sigma}_n \to \tau_{j_n}) \to \tau_j. \] \begin{examples*} \begin{alignat*}{2} &\unit &&:= \mu \alpha\,\alpha, \\ &\boole &&:= \mu \alpha\,(\alpha,\alpha), \\ &\nat &&:= \mu \alpha\,(\alpha,\alpha \to \alpha), \\ &\ytensor(\alpha_1)(\alpha_2) &&:= \mu \alpha. \alpha_1 \to \alpha_2 \to \alpha, \\ &\ypair(\alpha_1)(\alpha_2) &&:= \mu \alpha. (\unit \to \alpha_1) \to (\unit \to \alpha_2) \to \unit \to \alpha, \\ &\yplus(\alpha_1)(\alpha_2) &&:= \mu \alpha. (\alpha_1 \to \alpha, \alpha_2 \to \alpha), \\ &\lst(\alpha_1) &&:= \mu \alpha\,(\alpha,\alpha_1 \to \alpha \to \alpha), \\ % &\rho \times \sigma &&:= \mu \alpha.\rho \to \sigma \to \alpha, % \\ % &\rho + \sigma &&:= \mu \alpha\,(\rho \to \alpha, \sigma \to \alpha), % \\ &(\tree, \tlist) &&:= \mu (\alpha,\beta)\, (\alpha, \beta \to \alpha, \beta, \alpha \to \beta \to \beta), \\ &\btree &&:= \mu \alpha\,(\alpha, \alpha \to \alpha \to \alpha), \\ &\C{O} &&:= \mu \alpha\, (\alpha, \alpha \to \alpha, (\nat \to \alpha) \to \alpha), \\ &\C{T}_0 &&:= \nat, \\ &\C{T}_{n+1} &&:= \mu \alpha\,(\alpha, (\C{T}_n \to \alpha) \to \alpha). \end{alignat*} Note that we could have defined our primitive $\rho \times \sigma$ by $\mu \alpha.\rho \to \sigma \to \alpha$. However, this may lead to complex terms when it comes to extract programs from proofs. Therefore we stick to using $\rho \times \sigma$ as a primitive. \end{examples*} % \subsection{Types of formulas} % % We also have ground types \texttt{atomic}, \texttt{existential}, % \texttt{prop} and \texttt{top}; they will be used to assign types to % formulas. E.g.\ $\forall n\,n=0$ receives the type $\texttt{nat} \to % \texttt{atomic}$, and $\forall n,m \ex k\,n+m=k$ receives the type % $\texttt{nat} \to \texttt{nat} \to \texttt{existential}$. (Logical) % falsity $\bot$\index{falsity!logical} receives the type \texttt{top}. % The ground type \texttt{prop} is used for predicate variables, e.g.\ % $R$ of arity \texttt{nat,nat -> prop}. Types of formulas will be % necessary for normalization by evaluation of proof terms. Types not % involving the ground types \texttt{atomic}, \texttt{existential}, % \texttt{prop} and \texttt{top} are called object types. To add and remove names for type variables, we use \begin{align*} &\texttt{(add-tvar-name \textsl{name1} \dots)} \index{add-tvar-name@\texttt{add-tvar-name}} \\ &\texttt{(remove-tvar-name \textsl{name1} \dots)} \index{remove-tvar-name@\texttt{remove-tvar-name}} \end{align*} We need a constructor, accessors and a test for type variables. \begin{alignat*}{2} &\texttt{(make-tvar \textsl{index} \textsl{name})} &\quad& \text{constructor} \\ &\texttt{(tvar-to-index \textsl{tvar})} && \text{accessor} \index{tvar-to-index@\texttt{tvar-to-index}} \\ &\texttt{(tvar-to-name \textsl{tvar})} && \text{accessor} \index{tvar-to-name@\texttt{tvar-to-name}} \\ &\texttt{(tvar?\ \textsl{x}).} \index{tvar?@\texttt{tvar?}} \end{alignat*} To generate new type variables we use \begin{align*} &\texttt{(new-tvar \textsl{type})} \index{new-tvar@\texttt{new-tvar}} \end{align*} % Ground types are added and removed by % \begin{align*} % % &\texttt{(add-ground-type \textsl{symbol} \textsl{symbol1} \dots)} % \index{add-ground-type@\texttt{add-ground-type}} % \\ % % &\texttt{(remove-ground-type \textsl{type})}. % \index{remove-ground-type@\texttt{remove-ground-type}} % % \end{align*} % Executing \texttt{(add-ground-type \textsl{symbol} \textsl{symbol1} % \dots)} causes \texttt{symbol} to be ad\-ded as name for the newly % created ground type, and the optional symbols \textsl{symbol1} \dots % to be reserved as names for variables of that type. To introduce simultaneous free algebras we use \[ \texttt{add-algebras-with-parameters} \index{add-algebras-with-parameters@\texttt{add-algebras-with-parameters}}, \quad \hbox{abbreviated \texttt{add-param-algs} \index{add-param-algs@\texttt{add-param-algs}}}. \] An example is \begin{verbatim} (add-param-algs (list "labtree" "labtlist") 'alg-typeop 2 '("LabLeaf" "alpha1=>labtree") '("LabBranch" "labtlist=>alpha2=>labtree") '("LabEmpty" "labtlist") '("LabTcons" "labtree=>labtlist=>labtlist" pairscheme-op)) \end{verbatim} This simultaneously introduces the two free algebras \texttt{labtree} and \texttt{labtlist}, both finitary, whose constructors are \texttt{LabLeaf}, \texttt{LabBranch}, \texttt{LabEmpty} and \texttt{LabTcons} (written as an infix pair operator, hence right associative). The constructors are introduced as \inquotes{self-evaluating} constants; they play a special role in our semantics for normalization by evaluation. In case there are no parameters we use \texttt{add-algs}% \index{add-algs@\texttt{add-algs}}, and in case there is no need for a simultaneous definition we use \texttt{add-alg}% \index{add-alg@\texttt{add-alg}} or \texttt{add-param-alg}% \index{add-param-alg@\texttt{add-param-alg}}. For already introduced algebras we need constructors and accessors \begin{align*} &\texttt{(make-alg \textsl{name} \textsl{type1} \dots)} \index{make-alg@\texttt{make-alg}} \\ &\texttt{(alg-form-to-name \textsl{alg})} \index{alg-form-to-name@\texttt{alg-form-to-name}} \\ &\texttt{(alg-form-to-types \textsl{alg})} \index{alg-form-to-types@\texttt{alg-form-to-types}} \\ &\texttt{(alg-name-to-simalg-names \textsl{alg-name})} \index{alg-name-to-simalg-names@\texttt{alg-name-to-simalg-names}} \\ &\texttt{(alg-name-to-token-types \textsl{alg-name})} \index{alg-name-to-token-types@\texttt{alg-name-to-token-types}} \\ &\texttt{(alg-name-to-typed-constr-names \textsl{alg-name})} \index{alg-name-to-typed-constr-names@\texttt{alg-name-to-typed-constr-names}} \\ &\texttt{(alg-name-to-tvars \textsl{alg-name})} \index{alg-name-to-tvars@\texttt{alg-name-to-tvars}} \\ &\texttt{(alg-name-to-arity \textsl{alg-name})} \index{alg-name-to-arity@\texttt{alg-name-to-arity}} \end{align*} We also provide the tests \begin{alignat*}{2} &\texttt{(alg-form?\ \textsl{x})} &\quad& \text{incomplete test} \index{alg-form?@\texttt{alg-form?}} \\ &\texttt{(alg?\ \textsl{x})} && \text{complete test} \index{alg?@\texttt{alg?}} \\ &\texttt{(finalg?\ \textsl{type})} && \text{incomplete test} \index{finalg?@\texttt{finalg?}} \\ &\texttt{(ground-type?\ \textsl{x})} && \text{incomplete test} \index{ground-type?@\texttt{ground-type?}} \end{alignat*} We require that there is at least one nullary constructor in every free algebra; hence, it has a \inquotes{canonical inhabitant}. For arbitrary types this need not be the case, but occasionally (e.g.\ for general logical problems, like to prove the drinker formula) it is useful. Therefore \begin{align*} &\texttt{(make-inhabited \textsl{type} \textsl{term1} \dots)} \index{make-inhabited@\texttt{make-inhabited}} \end{align*} marks the optional term as the canonical inhabitant if it is provided, and otherwise creates a new constant of that type, which is taken to be the canonical inhabitant. We also have \[ \texttt{(type-to-canonical-inhabitant \textsl{type})}, \] which returns the canonical inhabitant; it is an error to apply this procedure to a non-inhabited type. We do allow non-inhabited types to be able to implement some aspects of \cite{Hofmann99,AehligSchwichtenberg00} To remove names for algebras we use \begin{align*} &\texttt{(remove-alg-name \textsl{name1} \dots)} \index{remove-alg-name@\texttt{remove-alg-name}} \end{align*} \textbf{Examples.} Standard examples for finitary free algebras are the type \texttt{nat} of unary natural numbers, and the type \texttt{btree} of binary trees. The domain $\C{I}_{\texttt{nat}}$ of unary natural numbers is defined (as in \cite{BergerEberlSchwichtenberg03}) as a solution to a domain equation. We always provide the finitary free algebra \texttt{unit} consisting of exactly one element, and \texttt{boole} of booleans; objects of the latter type are (cf.\ loc.\ cit.)\ \texttt{true}, \texttt{false} and families of terms of this type, and in addition the bottom object of type \texttt{boole}. Tests: \begin{align*} % &\texttt{(alg?\ \textsl{type})% % \index{alg?@\texttt{alg?}}} \\ % % % &\texttt{(finalg?\ \textsl{type})% % \index{finalg?@\texttt{finalg?}}}\\ % % &\texttt{(ground-type?\ \textsl{type})} % \index{ground-type?@\texttt{ground-type?}}\\ &\texttt{(arrow-form?\ \textsl{type})} \index{arrow-form?@\texttt{arrow-form?}} \\ &\texttt{(star-form?\ \textsl{type})} \index{star-form?@\texttt{star-form?}} \\ &\texttt{(object-type?\ \textsl{type})} \index{object-type?@\texttt{object-type?}} \end{align*} We also need constructors and accessors for arrow types \begin{alignat*}{2} &\texttt{(make-arrow \textsl{arg-type} \textsl{val-type})} \index{make-arrow@\texttt{make-arrow}} &\quad& \text{constructor} \\ &\texttt{(arrow-form-to-arg-type \textsl{arrow-type})} \index{arrow-form-to-arg-type@\texttt{arrow-form-to-arg-type}} && \text{accessor} \\ &\texttt{(arrow-form-to-val-type \textsl{arrow-type})} \index{arrow-form-to-val-type@\texttt{arrow-form-to-val-type}} && \text{accessor} \end{alignat*} and star types \begin{alignat*}{2} &\texttt{(make-star \textsl{type1} \textsl{type2})} \index{make-star@\texttt{make-star}} &\quad& \text{constructor} \\ &\texttt{(star-form-to-left-type \textsl{star-type})} \index{star-form-to-left-type@\texttt{star-form-to-left-type}} && \text{accessor} \\ &\texttt{(star-form-to-right-type star-type)} \index{star-form-to-right-type@\texttt{star-form-to-right-type}} && \text{accessor.} \end{alignat*} For convenience we also have \begin{alignat*}{2} &\texttt{(mk-arrow \textsl{type1} \dots\ \textsl{type})} \index{mk-arrow@\texttt{mk-arrow}} \\ &\texttt{(arrow-form-to-arg-types \textsl{type} <\textsl{n}>)} \index{arrow-form-to-arg-types@\texttt{arrow-form-to-arg-types}} &\quad& \text{all (first $n$) argument types} \\ &\texttt{(arrow-form-to-final-val-type \textsl{type})} \index{arrow-form-to-final-val-type@\texttt{arrow-form-to-final-val-type}} && \text{type of final value.} \end{alignat*} To check and to display a type we have \begin{align*} &\texttt{(type?\ \textsl{x})} \index{type?@\texttt{type?}} \\ &\texttt{(type-to-string \textsl{type}).} \index{type-to-string@\texttt{type-to-string}} \end{align*} \textbf{Implementation.} Type variables are implemented as lists: \[ \texttt{(tvar \textsl{index} \textsl{name})}. \] \section{Variables} \mylabel{Variables} A variable of an object type is interpreted by a continuous functional (object) of that type. We use the word \inquotes{variable} and not \inquotes{program variable}, since continuous functionals are not necessarily computable. For readable in- and output, and also for ease in parsing, we may reserve certain strings as names for variables of a given type, e.g.\ $\texttt{n}, \texttt{m}$ for variables of type \texttt{nat}. Then also $\texttt{n0}, \texttt{n1}, \texttt{n2}, \dots, \texttt{m0}, \dots$ can be used for the same purpose. In most cases we need to argue about existing (i.e.\ total) objects only. For the notion of totality we have to refer to \cite[Chapter 8.3]{Stoltenberg94}; particularly relevant here is exercise 8.5.7. To make formal arguments with quantifiers relativized to total objects more managable, we use a special sort of variables intended to range over such objects only. For example, $\texttt{n0}, \texttt{n1}, \texttt{n2}, \dots, \texttt{m0}, \dots$ range over total natural numbers, and $\verb#n^0#, \verb#n^1#, \verb#n^2#, \dots$ are general variables. We say that the \emph{degree of totality}\index{degree of totality} for the former is $1$, and for the latter $0$. % \subsection*{Interface} % To add and remove names for variables of a given type (e.g.\ $\texttt{n}, \texttt{m}$ for variables of type \texttt{nat}), we use \begin{align*} &\texttt{(add-var-name \textsl{name1} \dots\ \textsl{type})} \index{add-var-name@\texttt{add-var-name}} \\ &\texttt{(remove-var-name \textsl{name1} \dots\ \textsl{type})} \index{remove-var-name@\texttt{remove-var-name}} \\ &\texttt{(default-var-name \textsl{type}).} \index{default-var-name@\texttt{default-var-name}} \end{align*} The first variable name added for any given type becomes the default variable name. If the system creates new variables of this type, they will carry that name. For complex types it sometimes is necessary to talk about variables of a certain type without using a specific name. In this case one can use the empty string to create a so called numerated variable (see below). The parser is able to produce this kind of canonical variables from type expressions. We need a constructor, accessors and tests for variables. \begin{alignat*}{2} &\texttt{(make-var \textsl{type} \textsl{index} \textsl{t-deg} \textsl{name})} &\quad& \text{constructor} \\ &\texttt{(var-to-type \textsl{var})} && \text{accessor} \index{var-to-type@\texttt{var-to-type}} \\ &\texttt{(var-to-index \textsl{var})} && \text{accessor} \index{var-to-index@\texttt{var-to-index}} \\ &\texttt{(var-to-t-deg \textsl{var})} && \text{accessor} \index{var-to-t-deg@\texttt{var-to-t-deg}} \\ &\texttt{(var-to-name \textsl{var})} && \text{accessor} \index{var-to-name@\texttt{var-to-name}} \\ &\texttt{(var-form?\ \textsl{x})} && \text{incomplete test} \index{var-form?@\texttt{var-form?}} \\ &\texttt{(var?\ \textsl{x}).} && \text{complete test} \index{var?@\texttt{var?}} \end{alignat*} It is guaranteed that \texttt{equal?} is a valid test for equality of variables. Moreover, it is guaranteed that parsing a displayed variable reproduces the variable; the converse need not be the case (we may want to convert it into some canonical form). For convenience we have the function \begin{alignat*}{2} &\texttt{(mk-var \textsl{type} <\textsl{index}> <\textsl{t-deg}> <\textsl{name}>).} \index{mk-var@\texttt{mk-var}} \end{alignat*} The type is a required argument; however, the remaining arguments are optional. The default for the name string is the value returned by \begin{alignat*}{2} &\texttt{(default-var-name \textsl{type})} \index{default-var-name@\texttt{default-var-name}} \end{alignat*} If there is no default name, a numerated variable is created. The default for the totality is \inquotes{total}. Using the empty string as the name, we can create so called numerated variables. We further require that we can test whether a given variable belongs to those special ones, and that from every numerated variable we can compute its index: \begin{align*} &\texttt{(numerated-var?\ \textsl{var})} \index{numerated-var?@\texttt{numerated-var}} \\ &\texttt{(numerated-var-to-index \textsl{numerated-var}).} \index{numerated-var-to-index@\texttt{numerated-var-to-index}} \end{align*} It is guaranteed that \texttt{make-var} used with the empty name string is a bijection \[ \Types \times \D{N} \times \TDegs \to \NumVars \] with inverses \texttt{var-to-type}, \texttt{numerated-var-to-index} and \texttt{var-to-t-deg}. % \footnote{Here equality is to be understood as equality for the % respective \inquotes{types}, e.g.\ the first equation is to be understood as % % {\tt % (equal-vars?\ % (type-and-index-to-var % (var-to-type numerated-var) % (numerated-var-to-index numerated-var)) % numerated-var) % } % % is a truth value for every scheme object {\tt numerated-var} such that % {\tt (numerated-var?\ numerated-var)} is a truth value. % }% : % % \begin{verbatim} % (type-and-index-to-var % (var-to-type numerated-var) % (numerated-var-to-index numerated-var)) = numerated-var % (var-to-type (type-and-index-to-var type index)) = type % (numerated-var-to-index % (type-and-index-to-var type index)) = index % (numerated-var?\ (type-and-index-to-var type index)) = }t % \end{verbatim} Although these functions look like an ad hoc extension of the interface that is convenient for normalization by evaluation, there is also a deeper background: these functions can be seen as the \inquotes{computational content} of the well-known phrase \inquotes{we assume that there are infinitely many variables of every type}. Giving a constructive proof for this statement would require to give infinitely many examples of variables for every type. This of course can only be done by specifying a function (for every type) that enumerates these examples. To make the specification finite we require the examples to be given in a uniform way, i.e.\ by a function of two arguments. To make sure that all these examples are in fact different, we would have to require \texttt{make-var} to be injective. Instead, we require (classically equivalent) \texttt{make-var} to be a bijection on its image, as again, this can be turned into a computational statement by requiring that a witness (i.e.\ an inverse function) is given. Finally, as often the exact knowledge of infinitely many variables of every type is not needed we require that, either by using the above functions or by some other form of definition, functions \begin{align*} &\texttt{(type-to-new-var \textsl{type})} \index{type-to-new-var@\texttt{type-to-new-var}} \\ &\texttt{(type-to-new-partial-var \textsl{type})} \index{type-to-new-partial-var@\texttt{type-to-new-partial-var}} \end{align*} are defined that return a (total or partial) variable of the requested type, different from all variables that have ever been returned by any of the specified functions so far. Occasionally we may want to create a new variable with the same name (and degree of totality) as a given one. This is useful e.g.\ for bound renaming. Therefore we supply \begin{align*} &\texttt{(var-to-new-var \textsl{var}).} \index{var-to-new-var@\texttt{var-to-new-var}} \end{align*} \textbf{Implementation.} Variables are implemented as lists: \[ \texttt{(var \textsl{type} \textsl{index} \textsl{t-deg} \textsl{name})}. \] \section{Constants} \mylabel{Pconst} Every constant (or more precisely, object constant) has a type and denotes a computable (hence continuous) functional of that type. We have the following three kinds of constants: \begin{itemize} \item constructors, kind \texttt{constr}, \item constants with user defined rules (also called program(mable) constant, or pconst), kind \texttt{pconst}, \item constants whose rules are fixed, kind \texttt{fixed-rules}. \end{itemize} The latter are built into the system: recursion operators for arbitrary algebras, equality and existence operators for finitary algebras, and existence elimination. They are typed in parametrized form, with the actual type (or formula) given by a type (or type and formula) substitution that is also part of the constant. For instance, equality is typed by $\alpha \to \alpha \to \boole$ and a type substitution $\alpha \mapsto \rho$. This is done for clarity (and brevity, e.g.\ for large $\rho$ in the example above), since one should think of the type of a constant in this way. For constructors and for constants with fixed rules, by efficiency reasons we want to keep the object denoted by the constant (as needed for normalization by evaluation) as part of it. It depends on the type of the constant, hence must be updated in a given proof whenever the type changes by a type substitution. \subsection{Rewrite and computation rules for program constants} \mylabel{SS:RewCompRules} For every program constant $c^\rho$ we assume that some rewrite rules of the form $c\vec{K} \cnv N$ are given, where $\FV(N) \subseteq \FV(\vec{K})$ and $c\vec{K}$, $N$ have the same type (not necessarily a ground type). Moreover, for any two rules $c\vec{K} \cnv N$ and $c\vec{K}' \cnv N'$ we require that $\vec{K}$ and $\vec{K}'$ are of the same length, called the \emph{arity}\index{arity!of a program constant} of $c$. The rules are divided into \emph{computation rules}\index{computation rule} and proper \emph{rewrite rules}\index{rewrite rule}. They must satisfy the requirements listed in \cite{BergerEberlSchwichtenberg03}. The idea is that a computation rule can be understood as a description of a computation in a suitable \emph{semantical} model, provided the syntactic constructors correspond to semantic ones in the model, whereas the other rules describe \emph{syntactic} transformations. There a more general approach was used: one may enter into components of products. Then instead of one arity one needs several \inquotes{type informations} $\vec{\rho} \to \sigma$ with $\vec{\rho}$ a list of types, $0$'s and $1$'s indicating the left or right part of a product type. For example, if $c$ is of type $\tau \to (\tau \to \tau \to \tau) \times (\tau \to \tau)$, then the rules $cy0xx \cnv a$ and $cy1 \cnv b$ are admitted, and $c$ comes with the type informations $(\tau,0,\tau,\tau \to \tau) \to \tau$ and $(\tau,1) \to (\tau \to \tau)$. -- However, for simplicity we only deal with a single arity here. Given a set of rewrite rules, we want to treat some rules - which we call \indexentry{computation rules} - in a different, more efficient way. The idea is that a computation rule can be understood as a description of a computation in a suitable \indexentry{semantical model}, provided the syntactic constructors correspond to semantic ones in the model, whereas the other rules describe \emph{syntactic} transformations. In order to define what we mean by computation rules, we need the notion of a \indexentry{constructor pattern}. These are special terms defined inductively as follows. \begin{itemize} \item Every variable is a constructor pattern. \item If $c$ is a constructor and $P_1,\dots,P_n$ are constructor patterns (or projection markers 0 or 1), such that $c \vec{P}$ is of ground type, then $c\vec{P}$ is a constructor pattern. \end{itemize} From the given set of rewrite rules we choose a subset $\Comp$ with the following properties. \begin{itemize} \item If $c\vec{P} \cnv Q \in \Comp$, then $P_1,\dots,P_n$ are constructor patterns or projection markers. \item The rules are left-linear, i.e.\ if $c\vec{P} \cnv Q \in \Comp$, then every variable in $c\vec{P}$ occurs only once in $c\vec{P}$. \item The rules are non-overlapping, i.e.~for different rules $c\vec{K}\cnv M$ and $c\vec{L}\cnv N$ in $\Comp$ the left hand sides $c\vec{K}$ and $c\vec{L}$ are non-unifiable. \end{itemize} We write $c\vec{M} \cnv_{\comp} Q$ to indicate that the rule is in $\Comp$. All other rules will be called (proper) rewrite rules, written $c\vec{M} \cnv_{\rew} K$. In our reduction strategy computation rules will always be applied first, and since they are non-overlapping, this part of the reduction is unique. However, since we allowed almost arbitrary rewrite rules, it may happen that in case no computation rule applies a term may be rewritten by different rules $\notin \Comp$. In order to obtain a deterministic procedure we then select the first applicable rewrite rule (This is a slight simplification of \cite{BergerEberlSchwichtenberg03}, where special functions $\select_c$ were used for this purpose). \subsection{Recursion over simultaneous free algebras} \mylabel{SS:RecSFA} We now explain what we mean by recursion\index{recursion} over simultaneous free algebras. The inductive structure of the types $\vec{\mu} = \mu\vec{\alpha}\,\vec{\kappa}$ corresponds to two sorts of constants. With the \emph{constructors} $\constr_i^{\vec{\mu}} \colon \kappa_i[\vec{\mu}]$ we can construct elements of a type $\mu_j$, and with the \emph{recursion operators}\index{recursion operator} $\rec_{\mu_j}^{\vec{\mu}, \vec{\tau}}$ we can construct mappings from $\mu_j$ to $\tau_j$ by recursion on the structure of $\vec{\mu}$. So in \texttt{(Rec arrow-types)}, \texttt{arrow-types} is a list $\mu_1 \to \tau_1, \dots, \mu_k \to \tau_k$. Here $\mu_1, \dots, \mu_k$ are the algebras defined simultaneously and $\tau_1, \dots, \tau_k$ are the result types. For convenience in our later treatment of proofs (when we want to normalize a proof by (1) translating it into a term, (2) normalizing this term and (3) translating the normal term back into a proof), we also allow all-formulas $\forall x_1^{\mu_1} A_1, \dots, \forall x_k^{\mu_k} A_k$ instead of \texttt{arrow-types}: they are treated as $\mu_1 \to \tau(A_1)$, \dots, $\mu_k \to \tau(A_k)$ with $\tau(A_j)$ the type of $A_j$. Recall the definition of types and constructor types in Section~\ref{S:Types}, and the examples given there. In order to define the type of the recursion operators w.r.t.\ $\vec{\mu} = \mu\vec{\alpha}\, \vec{\kappa}$ and result types $\vec{\tau}$, we first define for \[ \kappa_i = \vec{\rho} \to (\vec{\sigma}_1 \to \alpha_{j_1}) \to \dots \to (\vec{\sigma}_n \to \alpha_{j_n}) \to \alpha_j \in \constrtypes(\vec{\alpha}) \] the \emph{step type} \begin{align*} \ST_i^{\vec{\mu}, \vec{\tau}} := \vec{\rho} \to &(\vec{\sigma}_1 \to \mu_{j_1}) \to \dots \to (\vec{\sigma}_n \to \mu_{j_n}) \to \\ &(\vec{\sigma}_1 \to \tau_{j_1}) \to \dots \to (\vec{\sigma}_n \to \tau_{j_n}) \to \tau_j. \end{align*} Here $\vec{\rho}, (\vec{\sigma}_1 \to \mu_{j_1}), \dots, (\vec{\sigma}_n \to \mu_{j_n})$ correspond to the \emph{components} % (or \emph{parameters}) of the object of type $\mu_j$ under consideration, and $(\vec{\sigma}_1 \to \tau_{j_1}), \dots, (\vec{\sigma}_n \to \tau_{j_n})$ to the previously defined values. The recursion operator $\rec_{\mu_j}^{\vec{\mu}, \vec{\tau}}$ has type \[ \rec_{\mu_j}^{\vec{\mu}, \vec{\tau}} \colon \ST_1^{\vec{\mu}, \vec{\tau}} \to \dots \to \ST_k^{\vec{\mu}, \vec{\tau}} \to \mu_j \to \tau_j. \] We will often write $\rec_j^{\vec{\mu}, \vec{\tau}}$ for $\rec_{\mu_j}^{\vec{\mu}, \vec{\tau}}$, and omit the upper indices $\vec{\mu}, \vec{\tau}$ when they are clear from the context. In case of a non-simultaneous free algebra, i.e.\ of type $\mu \alpha\,(\kappa)$, for $\rec_\mu^{\mu, \tau}$ we normally write $\rec_\mu^\tau$. A simple example for simultaneous free algebras is \[ (\tree, \tlist) := \mu (\alpha,\beta)\, (\alpha, \beta \to \alpha, \beta, \alpha \to \beta \to \beta). \] The constructors are \begin{align*} &\leaf^{\tree} := \constr_1^{(\tree, \tlist)}, \\ &\branch^{\tlist \to \tree} := \constr_2^{(\tree, \tlist)}, \\ &\empt^{\tlist} := \constr_3^{(\tree, \tlist)}, \\ &\tcons^{\tree \to \tlist \to \tlist} := \constr_4^{(\tree, \tlist)}. \end{align*} An example for a recursion constant is \begin{alignat*}{2} &\texttt{(const Rec $\delta_1 \to \delta_2 \to \delta_3 \to \delta_4 \to \tree \to \alpha_1$} \\ &\qquad \qquad \qquad \texttt{$(\alpha_1 \mapsto \tau_1, \alpha_2 \mapsto \tau_2)$)} \index{Rec@\texttt{Rec}} \\ \intertext{with} &\delta_1 := \alpha_1, \\ &\delta_2 := \tlist \to \alpha_2 \to \alpha_1, \\ &\delta_3 := \alpha_2, \\ &\delta_4 := \tree \to \tlist \to \alpha_1 \to \alpha_2 \to \alpha_2. \end{alignat*} Here the fact that we deal with a simultaneous recursion (over \texttt{tree} and \texttt{tlist}), and that we define a constant of type $\tree \to \dots$, can all be inferred from what is given: the type $\tree \to \dots$ is right there, and for \texttt{tlist} we can look up the simultaneously defined algebras. For the external representation (i.e.\ display) we use the shorter notation \[ \texttt{(Rec $\tree \to \tau_1$ $\tlist \to \tau_2$)}. \] % $$\texttt{(Rec $\tree \to \alpha_1$ $\tlist \to \alpha_2$ $(\alpha_1 % \mapsto \tau_1, \alpha_2 \mapsto \tau_2)$)}.$$ % A simplified version (without the recursive calls) of the recursion % operator is the following generalized if-then-else operator. % \begin{alignat*}{2} % % &\texttt{(const If $\alpha_1 \to \alpha_1 \to \tree \to \alpha_1$ % $(\alpha_1 \mapsto \tau_1)$).} % \index{If@\texttt{If}} % % \end{alignat*} % A shorter notation would be $\texttt{(if-at $\tree \to \tau_1$)}$, but % again we prefer the more systematic one above. As already mentioned, it is also possible that the object constant \texttt{Rec} comes with formulas instead of types, as the assumption constant \texttt{Ind} below. This is due to our desire not to duplicate code when normalizing proofs, but rather translate the proof into a term first, normalize the term and then translate back into a proof. For the last step we must have the original formulas of the induction operator available. To see a concrete example, let us recursively define addition $+ \colon \tree \to \tree \to \tree$ and $\oplus \colon \tlist \to \tree \to \tlist$. The recursion equations to be satisfied are \begin{alignat*}{2} &+\,\leaf &&= \lambda a a, \\ &+(\branch\,\bs) &&= \lambda a.\branch(\oplus\,\bs\,a),\\[6pt] &\oplus\,\empt &&= \lambda a\,\empt, \\ &\oplus(\tcons\,b\,\bs) &&= \lambda a.\tcons(+\,b\,a)(\oplus\,\bs\,a). \end{alignat*} We define $+$ and $\oplus$ by means of the recursion operators $\rec_{\tree}$ and $\rec_{\tlist}$ with result types \begin{align*} \tau_1 &:= \tree \to \tree, \\ \tau_2 &:= \tree \to \tlist. \end{align*} The step terms are \begin{align*} M_1 &:= \lambda a a, \\ M_2 &:= \lambda \bs \lambda g^{\tau_2} \lambda a.\branch(g\,a), \\ M_3 &:= \lambda a\,\empt, \\ M_4 &:= \lambda b \lambda \bs \lambda f^{\tau_1} \lambda g^{\tau_2} \lambda a. \tcons(f\,a)(g\,a). \end{align*} Then \begin{align*} + &:= \rec_{\tree} \vec{M} \colon \tree \to \tree \to \tree, \\ \oplus &:= \rec_{\tlist} \vec{M} \colon \tlist \to \tree \to \tlist. \end{align*} To explain the \emph{conversion relation}\index{conversion relation}, it will be useful to employ the following notation. Let $\vec{\mu} = \mu \vec{\alpha}\,\vec{\kappa}$, \[ \kappa_i = \rho_1 \to \dots \to \rho_m \to (\vec{\sigma}_1 \to \alpha_{j_1}) \to \dots \to (\vec{\sigma}_n \to \alpha_{j_n}) \to \alpha_j \in \constrtypes(\vec{\alpha}), \] and consider $\constr_i^{\vec{\mu}} \vec{N}$. Then we write $\vec{N}^P = N_1^P, \dots, N_m^P$ for the \emph{parameter arguments} $N_1^{\rho_1}, \dots, N_m^{\rho_m}$ and $\vec{N}^R = N_1^R, \dots, N_n^R$ for the \emph{recursive arguments} $N_{m+1}^{\vec{\sigma}_1 \to \mu_{j_1}}, \dots, N_{m+n}^{\vec{\sigma}_n \to \mu_{j_n}}$, and $n^R$ for the number $n$ of recursive arguments. We define a \emph{conversion relation} $\cnv_\rho$ between terms of type $\rho$ by \begin{align} (\lambda xM)N &\cnv \subst{M}{x}{N}\label{betaconv} \\ \lambda x.Mx &\cnv M\quad\hbox{if $x \notin \FV(M)$, $M$ not an abstraction}\label{etaconv} \\ (\rec_j^{\vec{\mu}, \vec{\tau}} \vec{M})^{\mu_j \to \tau_j} (\constr_i^{\vec{\mu}} \vec{N}) &\cnv M_i \vec{N} \bigl( (\rec_{j_1}^{\vec{\mu}, \vec{\tau}} \vec{M}) \circ N_1^R\bigr) \dots \bigl( (\rec_{j_n}^{\vec{\mu}, \vec{\tau}} \vec{M}) \circ N_n^R\bigr) \label{recconv} \end{align} Here we have written $\rec_j^{\vec{\mu}, \vec{\tau}}$ for $\rec_{\mu_j}^{\vec{\mu}, \vec{\tau}}$, and $\circ$ means composition. \subsection{Internal representation of constants} Every object constant has the internal representation \begin{align*} \texttt{(const\ } &\hbox{ \textsl{object-or-arity} \textsl{name} \textsl{uninst-type-or-formula} \textsl{subst}} \\ &\hbox{\textsl{t-deg} \textsl{token-type} \textsl{arrow-types-or-repro-formulas}\texttt{)},} \end{align*} where \textsl{subst} may have type, object and assumption variables in its domain. The type of the constant is the result of carrying out this substitution in \textsl{uninst-type-or-formula} (if this is a type; otherwise first substitute and then convert the formula into a type); free type variables may again occur in this type. Note that a formula will occur if \textsl{name} is \texttt{Ex-Intro} or \texttt{Ex-Elim}, and may occur if it is \texttt{Rec}. Examples for object constants are \begin{alignat*}{2} &\texttt{(const Compose $(\alpha {\to} \beta) {\to} (\beta {\to} \gamma) {\to} \alpha {\to} \gamma$ $(\alpha \mapsto \rho, \beta \mapsto \sigma, \gamma \mapsto \tau)$ \dots)} \index{Compose@\texttt{Compose}} \\ &\texttt{(const Eq $\alpha \to \alpha \to \boole$ $(\alpha \mapsto \textsl{finalg})$ \dots)} \index{Eq@\texttt{Eq}} \\ &\texttt{(const E $\alpha \to \boole$ $(\alpha \mapsto \textsl{finalg} \dots)$)} \index{E@\texttt{E}} \\ &\texttt{(const Ex-Elim $\ex x^\alpha P(x) \to (\forall x^\alpha. P(x) \to Q) \to Q$} \\ &\qquad\qquad\qquad\quad \texttt{$(\alpha \mapsto \tau, P^{(\alpha)} \mapsto \set{z^\tau}{A}, Q \mapsto \set{}{B})$ \dots)} \index{Ex-Elim@\texttt{Ex-Elim}} \end{alignat*} \textsl{object-or-arity} is an object if this object cannot be changed, e.g.\ by allowing user defined rules for the constant; otherwise, the associated object needs to be updated whenever a new rule is added, and we have the arity of those rules instead. The rules are of crucial importance for the correctness of a proof, and should not be invisibly buried in the denoted object taken as part of the constant (hence of any term involving it). Therefore we keep the rules of a program constant and also its denoted objects (depending on type substitutions) at a central place, a global variable \texttt{PROGRAM-CONSTANTS} which assigns to every name of such a constant the constant itself (with uninstantiated type), the rules presently chosen for it and also its denoted objects (as association list with type substitutions as keys). When a new rule has been added, the new objects for the program constant are computed, and the new list to be associated with the program constant is written in \texttt{PROGRAM-CONSTANTS} instead. All information on a program constant exept its denoted object and its computation and rewrite rules (i.e.\ its type, degree of totality, arity and token type) is stable and hence can be kept as part of it. The \emph{token type}\index{token type} can be either \texttt{const} (i.e.\ constant written as application) or one of: \texttt{postfix-op}, \texttt{prefix-op}, \texttt{binding-op}, \texttt{add-op}, \texttt{mul-op}, \texttt{rel-op}, \texttt{and-op}, \texttt{or-op}, \texttt{imp-op} and \texttt{pair-op}. Constructor, accessors and tests for all kinds of constants: \begin{alignat*}{2} &\texttt{(make-const \textsl{obj-or-arity} \textsl{name} \textsl{kind} \textsl{uninst-type} \textsl{tsubst}} \\ &\quad \texttt{\textsl{t-deg} \textsl{token-type} . \textsl{arrow-types-or-repro-formulas})}% \index{make-const@\texttt{make-const}} % &\quad& \text{constructor} \\ &\texttt{(const-to-object-or-arity \textsl{const})}% \index{const-to-object-or-arity@\texttt{const-to-object-or-arity}} % && \text{accessor} \\ &\texttt{(const-to-name \textsl{const})}% \index{const-to-name@\texttt{const-to-name}} % && \text{accessor} \\ &\texttt{(const-to-kind \textsl{const})}% \index{const-to-kind@\texttt{const-to-kind}} % && \text{accessor} \\ &\texttt{(const-to-uninst-type \textsl{const})}% \index{const-to-uninst-type@\texttt{const-to-uninst-type}} % && \text{accessor} \\ &\texttt{(const-to-tsubst \textsl{const})}% \index{const-to-tsubst@\texttt{const-to-tsubst}} % && \text{accessor} \\ &\texttt{(const-to-t-deg \textsl{const})}% \index{const-to-t-deg@\texttt{const-to-t-deg}} % && \text{accessor} \\ &\texttt{(const-to-token-type \textsl{const})}% \index{const-to-token-type@\texttt{const-to-token-type}} % && \text{accessor} \\ &\texttt{(const-to-arrow-types-or-repro-formulas \textsl{const})}% \index{const-to-arrow-types-or-repro-formulas@\texttt{const-to-arrow-types-or{\dots}}} % && \text{accessor} \\ &\texttt{(const?\ \textsl{x})}% \index{const?@\texttt{const?}} % && \text{test} \\ &\texttt{(const=?\ \textsl{x} \textsl{y})} \index{const=?@\texttt{const=?}} % && \text{test.} \end{alignat*} The type substitution \textsl{tsubst} must be restricted to the type variables in \texttt{uninst-type}. \texttt{arrow-types-or-repro-formulas} are only present for the \texttt{Rec} constants. They are needed for the reproduction case. From these we can define \begin{alignat*}{2} &\texttt{(const-to-type \textsl{const})}% \index{const-to-type@\texttt{const-to-type}} \\ &\texttt{(const-to-tvars \textsl{const})}% \index{const-to-tvars@\texttt{const-to-tvars}} \end{alignat*} A \emph{constructor}\index{constructor} is a special constant with no rules. We maintain an association list \texttt{CONSTRUCTORS} assigning to every name of a constructor an association list associating with every type substitution (restricted to the type parameters) the corresponding instance of the constructor. We provide \begin{alignat*}{2} &\texttt{(constr-name? \textsl{string})}% \index{constr-name?@\texttt{constr-name?}} \\ &\texttt{(constr-name-to-constr \textsl{name} <\textsl{tsubst}>)}% \index{constr-name-to-constr@\texttt{constr-name-to-constr}} \\ &\texttt{(constr-name-and-tsubst-to-constr \textsl{name} \textsl{tsubst})}% \index{constr-name-and-tsubst-to-constr@\texttt{constr-name-and-tsubst{\dots}}}, \end{alignat*} where in \texttt{(constr-name-to-constr \textsl{name} <\textsl{tsubst}>)}, \textsl{name} is a string or else of the form \texttt{(Ex-Intro \textsl{formula})}. If the optional \textsl{tsubst} is not present, the empty substitution is used. For given algebras one can display the associated constructors with their types by calling \begin{alignat*}{2} &\texttt{(display-constructors \textsl{alg-name1} \dots)}% \index{display-constructors@\texttt{display-constructors}}. \end{alignat*} We also need procedures recovering information from the string denoting a program constant (via \texttt{PROGRAM-CONSTANTS}): \begin{alignat*}{2} &\texttt{(pconst-name-to-pconst \textsl{name})}% \index{pconst-name-to-pconst@\texttt{pconst-name-to-pconst}} \\ &\texttt{(pconst-name-to-comprules \textsl{name})}% \index{pconst-name-to-comprules@\texttt{pconst-name-to-comprules}} \\ &\texttt{(pconst-name-to-rewrules \textsl{name})}% \index{pconst-name-to-rewrules@\texttt{pconst-name-to-rewrules}} \\ &\texttt{(pconst-name-to-inst-objs \textsl{name})}% \index{pconst-name-to-inst-objs@\texttt{pconst-name-to-inst-objs}} \\ &\texttt{(pconst-name-and-tsubst-to-object \textsl{name} \textsl{tsubst})}% % \index{pconst-name-and-tsubst-to-object@\texttt{pconst-name-and-tsubst-to-object}} \\ &\texttt{(pconst-name-to-object \textsl{name})}% \index{pconst-name-to-object@\texttt{pconst-name-to-object}}. \end{alignat*} One can display the program constants together with their current computation and rewrite rules by calling \begin{alignat*}{2} &\texttt{(display-program-constants \textsl{name1} \dots)}% \index{display-program-constants@\texttt{display-program-constants}}. \end{alignat*} To add and remove program constants we use \begin{align*} &\texttt{(add-program-constant \textsl{name} \textsl{type} <\textsl{rest}>)} \index{add-program-constant@\texttt{add-program-constant}} \\ &\texttt{(remove-program-constant \textsl{symbol})}; \index{remove-program-constant@\texttt{remove-program-constant}} \end{align*} \textsl{rest} consists of an initial segment of the following list: \texttt{t-deg} (default $0$), \texttt{token-type} (default \texttt{const}) and \texttt{arity} (default maximal number of argument types). To add and remove computation and rewrite rules we have \begin{align*} &\texttt{(add-computation-rule \textsl{lhs} \textsl{rhs})} \index{add-computation-rule@\texttt{add-computation-rule}} \\ &\texttt{(add-rewrite-rule \textsl{lhs} \textsl{rhs})} \index{add-rewrite-rule@\texttt{add-rewrite-rule}} \\ &\texttt{(remove-computation-rules-for \textsl{lhs})} \index{remove-computation-rules-for@\texttt{remove-computation-rules-for}} \\ &\texttt{(remove-rewrite-rules-for \textsl{lhs}).} \index{remove-rewrite-rules-for@\texttt{remove-rewrite-rules-for}} \end{align*} To generate our constants with fixed rules we use \begin{alignat*}{2} &\texttt{(finalg-to-=-const \textsl{finalg})} \index{finalg-to-=-const@\texttt{finalg-to-=-const}} &\quad& \text{equality} \\ &\texttt{(finalg-to-e-const \textsl{finalg})} \index{finalg-to-e-const@\texttt{finalg-to-e-const}} && \text{existence} \\ &\texttt{(arrow-types-to-rec-const .\ \textsl{arrow-types})} \index{arrow-types-to-rec-const@\texttt{arrow-types-to-rec-const}} && \text{recursion} \\ &\texttt{(ex-formula-and-concl-to-ex-elim-const } \\ &\texttt{\qquad \textsl{ex-formula} \textsl{concl})}% \index{ex-formula-and-concl-to-ex-elim-const@\texttt{ex-for{\dots}-to-ex-elim-const}} \end{alignat*} Similarly to \texttt{arrow-types-to-rec-const} we also define the procedure \texttt{all-formulas-to-rec-const}. It will be used in to achieve normalization of proofs via translating them in terms. [Noch einf\"ugen: \texttt{arrow-types-to-cases-const} und zur Behandlung von Beweisen \texttt{all-formulas-to-cases-const}] \section{Predicate variables and constants} \mylabel{S:Psyms} \subsection{Predicate variables} \mylabel{SS:PredVars} A predicate variable of arity\index{arity!of a predicate variable} $\rho_1, \dots, \rho_n$ is a placeholder for a formula $A$ with distinguished (different) variables $x_1, \dots, x_n$ of types $\rho_1, \dots, \rho_n$. Such an entity is called a \indexentry{comprehension term}, written $\set{x_1, \dots, x_n}{A}$. % We also allow predicate constants % with a fixed intended meaning (e.g.\ $\bot$\index{bottom}). Predicate % variables and constants are both called predicate symbols. Predicate variable names are provided in the form of an association list, which assigns to the names their arities. By default we have the predicate variable \texttt{bot}\index{bottom} of arity \texttt{(arity)}, called (logical) falsity. It is viewed as a predicate variable rather than a predicate constant, since (when translating a classical proof into a constructive one) we want to substitute for \texttt{bot}. Often we will argue about \emph{Harrop formulas}\index{Harrop formula} only, i.e.\ formulas without computational content. For convenience we use a special sort of predicate variables intended to range over comprehension terms with Harrop formulas only. For example, $\texttt{P0}, \texttt{P1}, \texttt{P2}, \dots, \texttt{Q0}, \dots$ range over comprehension terms with Harrop formulas, and $\verb#P^0#, \verb#P^1#, \verb#P^2#, \dots$ are general predicate variables. We say that \emph{Harrop degree}\index{Harrop degree} for the former is $1$, and for the latter $0$. % \subsection*{Interface} We need constructors and accessors for arities \begin{align*} &\texttt{(make-arity \textsl{type1} \dots)} \index{make-arity@\texttt{make-arity}} \\ &\texttt{(arity-to-types \textsl{arity})} \index{arity-to-types@\texttt{arity-to-types}} \end{align*} To display an arity we have \[ \texttt{(arity-to-string \textsl{arity})} \index{arity-to-string@\texttt{arity-to-string}} \] We can test whether a string is a name for a predicate variable, and if so compute its associated arity: \begin{align*} &\texttt{(pvar-name?\ \textsl{string})} \index{pvar-name?@\texttt{pvar-name?}} \\ &\texttt{(pvar-name-to-arity \textsl{pvar-name})} \index{pvar-name-to-arity@\texttt{pvar-name-to-arity}} \end{align*} To add and remove names for predicate variables of a given arity (e.g.\ $\texttt{Q}$ for predicate variables of arity \texttt{nat}), we use \begin{align*} &\texttt{(add-pvar-name \textsl{name1} \dots\ \textsl{arity})}% \index{add-pvar-name@\texttt{add-pvar-name}} \\ &\texttt{(remove-pvar-name \textsl{name1} \dots)}% \index{remove-pvar-name@\texttt{remove-pvar-name}} \end{align*} We need a constructor, accessors and tests for predicate variables. % Note that the arity is not necessary as an argument for % \texttt{make-pvar}, since it can be read off from % \texttt{pvar-name}. \begin{alignat*}{2} &\texttt{(make-pvar \textsl{arity} \textsl{index} \textsl{h-deg} \textsl{name})} \index{make-pvar@\texttt{make-pvar}} &\quad& \text{constructor} \\ &\texttt{(pvar-to-arity \textsl{pvar})} \index{pvar-to-arity@\texttt{pvar-to-arity}} && \text{accessor} \\ &\texttt{(pvar-to-index \textsl{pvar})} \index{pvar-to-index@\texttt{pvar-to-index}} && \text{accessor} \\ &\texttt{(pvar-to-h-deg \textsl{pvar})} \index{pvar-to-h-deg@\texttt{pvar-to-h-deg}} && \text{accessor} \\ &\texttt{(pvar-to-name \textsl{pvar})} \index{pvar-to-name@\texttt{pvar-to-name}} && \text{accessor} \\ &\texttt{(pvar?\ \textsl{x})} \index{pvar?@\texttt{pvar?}} \\ &\texttt{(equal-pvars?\ \textsl{pvar1} \textsl{pvar2})} \index{equal-pvars?@\texttt{equal-pvars?}} \end{alignat*} For convenience we have the function \begin{alignat*}{2} &\texttt{(mk-pvar \textsl{arity} <\textsl{index}> <\textsl{h-deg}> <\textsl{name}>)} \end{alignat*} The arity is a required argument; the remaining arguments are optional. The default for \textsl{index} is $-1$, for \textsl{h-deg} is $1$ (i.e.\ Harrop-formula) and for \textsl{name} it is given by \texttt{(default-pvar-name \textsl{arity})}. It is guaranteed that parsing a displayed predicate variable reproduces the predicate variable; the converse need not be the case (we may want to convert it into some canonical form). \subsection{Predicate constants} \mylabel{SS:PredConsts} We also allow \emph{predicate constants}\index{predicate constant}. The general reason for having them is that we need predicates to be axiomatized, e.g.\ \texttt{Equal} and \texttt{Total} (which are \emph{not} placeholders for formulas). Prime formulas built from predicate constants do not give rise to extracted terms, and cannot be substituted for. Notice that a predicate constant does not change its name under a type substitution; this is in contrast to predicate (and other) variables. Notice also that the parser can infer from the arguments the types $\rho_1 \dots \rho_n$ to be substituted for the type variables in the uninstantiated arity of $P$. % Discarded 01-08-20 % We also allow \indexentry{predicate constants}; they are viewed as % constants with fixed rules. For equality \texttt{Eq} and existence % \texttt{Ex} there are such rules (e.g.\ $x=x \cnv T$), but for % predicate constants intended to be axiomatized there are no such % rules. The need for predicate constants comes up when e.g.\ an % inductively defined set is expressed via a formula stating the % existence of a generation tree; the kernel of this formula is to be % axiomatized, using the tree constructors. Since predicate constants % are constants with fixed rules, they do not give rise to extracted % terms, and cannot be substituted for. % A predicate constant does not change its name under a type % substitution; this is in contrast to predicate (and other) variables. % To enable the parser to infer its type, generally a predicate constant % is to be displayed in the form $(P \rho_1 \dots \rho_n)$, where % $\rho_1 \dots \rho_n$ are the types to be substituted for the type % variables in the uninstantiated type of $P$. However, quite often the % type substitution can be inferred by the parser from the types of the % arguments. This is the case e.g.\ for equality and existence, where % we can parse $x^{\alpha} = x^{\alpha}$ and $E x$ as well as $n^{\nat} % = n^{\nat}$ and $E n$. This happens quite regularly for all constants % whose type involves type variables (i.e.\ of token type % \texttt{constscheme} rather than \texttt{const}). % \subsection*{Interface} % To add and remove names for predicate constants of a given arity, we use \begin{align*} &\texttt{(add-predconst-name \textsl{name1} \dots\ \textsl{arity})}% \index{add-predconst-name@\texttt{add-predconst-name}} \\ &\texttt{(remove-predconst-name \textsl{name1} \dots)}% \index{remove-predconst-name@\texttt{remove-predconst-name}} \end{align*} We need a constructor, accessors and tests for predicate constants. \begin{alignat*}{2} &\texttt{(make-predconst \textsl{uninst-arity} \textsl{tsubst} \textsl{index} \textsl{name})} \index{make-predconst@\texttt{make-predconst}} &\quad& \text{constructor} \\ &\texttt{(predconst-to-uninst-arity \textsl{predconst})} \index{predconst-to-uninst-arity@\texttt{predconst-to-uninst-arity}} && \text{accessor} \\ &\texttt{(predconst-to-tsubst \textsl{predconst})} \index{predconst-to-tsubst@\texttt{predconst-to-tsubst}} && \text{accessor} \\ &\texttt{(predconst-to-index \textsl{predconst})} \index{predconst-to-index@\texttt{predconst-to-index}} && \text{accessor} \\ &\texttt{(predconst-to-name \textsl{predconst})} \index{predconst-to-name@\texttt{predconst-to-name}} && \text{accessor} \\ &\texttt{(predconst?\ \textsl{x})} \index{predconst?@\texttt{predconst?}} \end{alignat*} Moreover we need \begin{alignat*}{2} &\texttt{(predconst-name? \textsl{name})}% \index{predconst-name?@\texttt{predconst-name?}} \\ &\texttt{(predconst-name-to-arity \textsl{predconst-name})}.% \index{predconst-name-to-arity@\texttt{predconst-name-to-arity}} \\ &\texttt{(predconst-to-string \textsl{predconst})}.% \index{predconst-to-string@\texttt{predconst-to-string}} \end{alignat*} \subsection{Inductively defined predicate constants} \mylabel{SS:IDPredConsts} As we have seen, type variables allow for a general treatment of inductively generated types $\mu \vec{\alpha} \,\vec{\kappa}$. Similarly, we can use predicate variables to inductively generate predicates $\mu \vec{X} \,\vec{K}$. More precisely, we allow the formation of inductively generated predicates $\mu \vec{X} \,\vec{K}$, where $\vec{X} = (X_j)_{j=1,\dots,N}$ is a list of distinct predicate variables, and $\vec{K} = (K_i)_{i=1,\dots,k}$ is a list of constructor formulas (or \inquotes{clauses}\index{clause}) containing $X_1,\dots,X_N$ in strictly positive positions only. To introduce inductively defined predicates we use \[ \texttt{add-ids}\index{add-ids@\texttt{add-ids}}. \] An example is \begin{verbatim} (add-ids (list (list "Ev" (make-arity (py "nat")) "algEv") (list "Od" (make-arity (py "nat")) "algOd")) '("Ev 0" "InitEv") '("allnc n.Od n -> Ev(n+1)" "GenEv") '("Od 1" "InitOd") '("allnc n.Ev n -> Od(n+1)" "GenOd")) \end{verbatim} This simultaneously introduces the two inductively defined predicate constants \texttt{Ev} and \texttt{Od}, by the clauses given. The presence of an algebra name after the arity (here \texttt{algEv} and \texttt{algOd}) indicates that this inductively defined predicate constant is to have computational content. Then all clauses with this constant in the conclusion must provide a constructor name (here \texttt{InitEv}, \texttt{GenEv}, \texttt{InitOd}, \texttt{GenOd}). If the constant is to have no computational content, then all its clauses must be invariant (under realizability, a.k.a.\ \inquotes{negative}). Here are some further examples of inductively defined predicates: \begin{verbatim} (add-ids (list (list "Even" (make-arity (py "nat")) "algEven")) '("Even 0" "InitEven") '("allnc n.Even n -> Even(n+2)" "GenEven")) (add-ids (list (list "Acc" (make-arity (py "nat")) "algAcc")) '("allnc n.(all m.m Acc m) -> Acc n" "GenAccSup")) (add-ids (list (list "OrID" (make-arity) "algOrID")) '("P^1 -> OrID" "InlOrID") '("P^2 -> OrID" "InrOrID")) (add-ids (list (list "EqID" (make-arity (py "alpha") (py "alpha")) "algEqID")) '("allnc x^ EqID x^ x^" "GenEqID")) (add-ids (list (list "ExID" (make-arity) "algExID")) '("allnc x^.Q^ x^ -> ExID" "GenExID")) (add-ids (list (list "FalsityID" (make-arity) "algFalsityID"))) \end{verbatim} \section{Terms and objects} \mylabel{Terms} Terms are built from (typed) variables and constants by abstraction, application, pairing, formation of left and right components (i.e.\ projections) and the \texttt{if}-construct. The \texttt{if}-construct\index{if-construct@\texttt{if}-construct} distinguishes cases according to the outer constructor form; the simplest example (for the type \texttt{boole}) is \emph{if-then-else}. Here we do not want to evaluate all arguments right away, but rather evaluate the test argument first and depending on the result evaluate at most one of the other arguments. This phenomenon is well known in functional languages; e.g.\ in \textsc{Scheme} the \texttt{if}-construct is called a \emph{special form} as opposed to an operator. In accordance with this terminology we also call our \texttt{if}-construct a special form\index{special form}. It will be given a special treatment in \texttt{nbe-term-to-object}. Usually it will be the case that every closed term of an sfa ground type reduces via the computation rules to a constructor term, i.e.\ a closed term built from constructors only. However, we do not require this. % \subsection*{Interface} We have constructors, accessors and tests for variables \begin{alignat*}{2} &\texttt{(make-term-in-var-form var)} \index{make-term-in-var-form@\texttt{make-term-in-var-form}} &\quad& \text{constructor} \\ % &\texttt{(term-in-var-form-to-string \textsl{term})} % \index{term-in-var-form-to-string@\texttt{term-in-var-form-to-string}} % && \text{accessor,} \\ &\texttt{(term-in-var-form-to-var \textsl{term})} \index{term-in-var-form-to-var@\texttt{term-in-var-form-to-var}} && \text{accessor,} \\ &\texttt{(term-in-var-form?\ \textsl{term})} \index{term-in-var-form?@\texttt{term-in-var-form?}} && \text{test,} \end{alignat*} for constants \begin{alignat*}{2} &\texttt{(make-term-in-const-form \textsl{const})} \index{make-term-in-const-form@\texttt{make-term-in-const-form}} &\quad& \text{constructor} \\ &\texttt{(term-in-const-form-to-const \textsl{term})} \index{term-in-const-form-to-const@\texttt{term-in-const-form-to-const}} && \text{accessor} \\ &\texttt{(term-in-const-form?\ \textsl{term})} \index{term-in-const-form?@\texttt{term-in-const-form?}} && \text{test,} \end{alignat*} for abstractions \begin{alignat*}{2} &\texttt{(make-term-in-abst-form \textsl{var} \textsl{term})} \index{make-term-in-abst-form@\texttt{make-term-in-abst-form}} &\quad& \text{constructor} \\ &\texttt{(term-in-abst-form-to-var \textsl{term})} \index{term-in-abst-form-to-var@\texttt{term-in-abst-form-to-var}} && \text{accessor} \\ &\texttt{(term-in-abst-form-to-kernel \textsl{term})} \index{term-in-abst-form-to-kernel@\texttt{term-in-abst-form-to-kernel}} && \text{accessor} \\ &\texttt{(term-in-abst-form?\ \textsl{term})} \index{term-in-abst-form?@\texttt{term-in-abst-form?}} && \text{test,} \end{alignat*} for applications \begin{alignat*}{2} &\texttt{(make-term-in-app-form \textsl{term1} \textsl{term2})} \index{make-term-in-app-form@\texttt{make-term-in-app-form}} &\quad& \text{constructor} \\ &\texttt{(term-in-app-form-to-op \textsl{term})} \index{term-in-app-form-to-op@\texttt{term-in-app-form-to-op}} && \text{accessor} \\ &\texttt{(term-in-app-form-to-arg \textsl{term})} \index{term-in-app-form-to-arg@\texttt{term-in-app-form-to-arg}} && \text{accessor} \\ &\texttt{(term-in-app-form?\ \textsl{term})} \index{term-in-app-form?@\texttt{term-in-app-form?}} && \text{test,} \end{alignat*} for pairs \begin{alignat*}{2} &\texttt{(make-term-in-pair-form \textsl{term1} \textsl{term2})} \index{make-term-in-pair-form@\texttt{make-term-in-pair-form}} &\quad& \text{constructor} \\ &\texttt{(term-in-pair-form-to-left \textsl{term})} \index{term-in-pair-form-to-left@\texttt{term-in-pair-form-to-left}} && \text{accessor} \\ &\texttt{(term-in-pair-form-to-right \textsl{term})} \index{term-in-pair-form-to-right@\texttt{term-in-pair-form-to-right}} && \text{accessor} \\ &\texttt{(term-in-pair-form?\ \textsl{term})} \index{term-in-pair-form?@\texttt{term-in-pair-form?}} && \text{test,} \end{alignat*} for the left and right component of a pair \begin{alignat*}{2} &\texttt{(make-term-in-lcomp-form \textsl{term})} \index{make-term-in-lcomp-form@\texttt{make-term-in-lcomp-form}} &\quad& \text{constructor} \\ &\texttt{(make-term-in-rcomp-form \textsl{term})} \index{make-term-in-rcomp-form@\texttt{make-term-in-rcomp-form}} && \text{constructor} \\ &\texttt{(term-in-lcomp-form-to-kernel \textsl{term})} \index{term-in-lcomp-form-to-kernel@\texttt{term-in-lcomp-form-to-kernel}} && \text{accessor} \\ &\texttt{(term-in-rcomp-form-to-kernel \textsl{term})} \index{term-in-rcomp-form-to-kernel@\texttt{term-in-rcomp-form-to-kernel}} && \text{accessor} \\ &\texttt{(term-in-lcomp-form?\ \textsl{term})} \index{term-in-lcomp-form?@\texttt{term-in-lcomp-form?}} && \text{test} \\ &\texttt{(term-in-rcomp-form?\ \textsl{term})} \index{term-in-rcomp-form?@\texttt{term-in-rcomp-form?}} && \text{test} \end{alignat*} and for \texttt{if}-constructs \begin{alignat*}{2} &\texttt{(make-term-in-if-form \textsl{test} \textsl{alts} .\ \textsl{rest})} \index{make-term-in-if-form@\texttt{make-term-in-if-form}} &\quad& \text{constructor} \\ &\texttt{(term-in-if-form-to-test \textsl{term})} \index{term-in-if-form-to-test@\texttt{term-in-if-form-to-test}} && \text{accessor} \\ &\texttt{(term-in-if-form-to-alts \textsl{term})} \index{term-in-if-form-to-alts@\texttt{term-in-if-form-to-alts}} && \text{accessor} \\ &\texttt{(term-in-if-form-to-rest \textsl{term})} \index{term-in-if-form-to-rest@\texttt{term-in-if-form-to-rest}} && \text{accessor} \\ &\texttt{(term-in-if-form?\ \textsl{term})} \index{term-in-if-form?@\texttt{term-in-if-form?}} && \text{test.} \end{alignat*} where in \texttt{make-term-in-if-form}, \textsl{rest} is either empty or an all-formula. It is convenient to have more general application constructors and accessors available, where application takes arbitrary many arguments and works for ordinary application as well as for component formation. \begin{alignat*}{2} &\texttt{(mk-term-in-app-form \textsl{term} \textsl{term1} \dots)} \index{mk-term-in-app-form@\texttt{mk-term-in-app-form}} &\quad& \text{constructor} \\ &\texttt{(term-in-app-form-to-final-op \textsl{term})} \index{term-in-app-form-to-final-op@\texttt{term-in-app-form-to-final-op}} && \text{accessor} \\ &\texttt{(term-in-app-form-to-args \textsl{term})} \index{term-in-app-form-to-args@\texttt{term-in-app-form-to-args}} && \text{accessor,} \end{alignat*} Also for abstraction it is convenient to have a more general constructor taking arbitrary many variables to be abstracted one after the other \begin{alignat*}{2} &\texttt{(mk-term-in-abst-form \textsl{var1} \dots\ \textsl{term})}. \index{mk-term-in-abst-form@\texttt{mk-term-in-abst-form}} \end{alignat*} We also allow vector notation for recursion (cf.\ Joachimski and Matthes \cite{JoachimskiMatthes03}). Moreover we need \begin{alignat*}{2} &\texttt{(term?\ \textsl{x})} \index{term?@\texttt{term?}} \\ &\texttt{(term=?\ \textsl{term1} \textsl{term2})} \index{term=?@\texttt{term=?}} \\ &\texttt{(terms=?\ \textsl{terms1} \textsl{terms2})} \index{terms=?@\texttt{terms=?}} \\ &\texttt{(term-to-type \textsl{term})} \index{term-to-type@\texttt{term-to-type}} \\ &\texttt{(term-to-free \textsl{term})} \index{term-to-free@\texttt{term-to-free}} \\ &\texttt{(term-to-bound \textsl{term})} \index{term-to-bound@\texttt{term-to-bound}} \\ &\texttt{(term-to-t-deg \textsl{term})} \index{term-to-t-deg@\texttt{term-to-t-deg}} \\ &\texttt{(synt-total?\ \textsl{term})} \index{synt-total?@\texttt{synt-total?}} \\ &\texttt{(term-to-string \textsl{term})}. \index{term-to-string@\texttt{term-to-string}} \end{alignat*} % To take care of arithmetical terms, we use % \begin{alignat*}{2} % % % &\texttt{(mk-+ )} \\ % &\texttt{(mk-- )} \\ % &\texttt{(mk-max )} \\ % &\texttt{(mk-min )} \\ % &\texttt{(mk-* )} % % % \end{alignat*} \subsection{Normalization} We need an operation which transforms a term into its normal form w.r.t.\ the given computation and rewrite rules. Here we base our treatment on \emph{normalization by evaluation} introduced in \cite{BergerSchwichtenberg91a}, and extended to arbitrary computation and rewrite rules in \cite{BergerEberlSchwichtenberg03}. For normalization by evaluation we need semantical \emph{objects}. For an arbitrary ground type every term family of that type is an object. For an sfa ground type, in addition the constructors have semantical counterparts. The freeness of the constructors is expressed by requiring that their ranges are disjoint and that they are injective. Moreover, we view the free algebra as a domain and require that its bottom element is not in the range of the constructors. Hence the constructors are total and non-strict. Then by applying \texttt{nbe-reflect} followed by \texttt{nbe-reify} we can normalize every term, where normalization refers to the computation as well as the rewrite rules. % \subsection*{Interface} An object consists of a semantical value and a type. \begin{alignat*}{2} &\texttt{(nbe-make-object \textsl{type} \textsl{value})} \index{nbe-make-object@\texttt{nbe-make-object}} &\quad& \text{constructor} \\ &\texttt{(nbe-object-to-type \textsl{object})} \index{nbe-object-to-type@\texttt{nbe-object-to-type}} && \text{accessor} \\ &\texttt{(nbe-object-to-value \textsl{object})} \index{nbe-object-to-value@\texttt{nbe-object-to-value}} && \text{accessor} \\ &\texttt{(nbe-object?\ \textsl{x})} \index{nbe-object?@\texttt{nbe-object?}} && \text{test.} \end{alignat*} To work with objects, we need \begin{alignat*}{2} &\texttt{(nbe-object-apply \textsl{function-obj} \textsl{arg-obj})} \index{nbe-object-apply@\texttt{nbe-object-apply}} \end{alignat*} Again it is convenient to have a more general application operation available, which takes arbitrary many arguments and works for ordinary application as well as for component formation. We also need an operation composing two unary function objects. \begin{alignat*}{2} &\texttt{(nbe-object-app \textsl{function-obj} \textsl{arg-obj1} \dots)} \index{nbe-object-app@\texttt{nbe-object-app}} \\ &\texttt{(nbe-object-compose \textsl{function-obj1} \textsl{function-obj2})} \index{nbe-object-compose@\texttt{nbe-object-compose}} \end{alignat*} For ground type values we need constructors, accessors and tests. To make constructors \inquotes{self-evaluating}, a constructor value has the form \[ \hbox{\texttt{(constr-value \textsl{name} \textsl{objs} \textsl{delayed-constr})},} \] where \textsl{delayed-constr} is a procedure of zero arguments which evaluates to this very same constructor. This is necessary to avoid having a cycle (for nullary constructors, and only for those). \begin{alignat*}{2} &\texttt{(nbe-make-constr-value \textsl{name} \textsl{objs})} \index{nbe-make-constr-value@\texttt{nbe-make-constr-value}} &\quad& \text{constructor} \\ &\texttt{(nbe-constr-value-to-name \textsl{value})} \index{nbe-constr-value-to-name@\texttt{nbe-constr-value-to-name}} && \text{accessor} \\ &\texttt{(nbe-constr-value-to-args \textsl{value})} && \text{accessor} \\ &\texttt{(nbe-constr-value-to-constr \textsl{value})} \index{nbe-constr-value-to-constr@\texttt{nbe-constr-value-to-constr}} && \text{accessor} \\ &\texttt{(nbe-constr-value?\ \textsl{value})} \index{nbe-constr-value?@\texttt{nbe-constr-value?}} && \text{test} \\ &\texttt{(nbe-fam-value?\ \textsl{value})} \index{nbe-fam-value?@\texttt{nbe-fam-value?}} && \text{test.} \end{alignat*} The essential function which \inquotes{animates}\index{animation} the program constants according to the given computation and rewrite rules is \begin{align*} &\texttt{(nbe-pconst-and-tsubst-and-rules-to-object} \index{nbe-pconst-and-tsubst-and-rules-to-object@\texttt{nbe-pconst-{\dots}-to-object}} \\ &\qquad \texttt{\textsl{pconst}\ \textsl{tsubst}\ \textsl{comprules}\ \textsl{rewrules})} \end{align*} Using it we can the define an \indexentry{evaluation} function, which assigns to a term and an environment a semantical object: \begin{alignat*}{2} &\texttt{(nbe-term-to-object \textsl{term} \textsl{bindings})} \index{nbe-term-to-object@\texttt{nbe-term-to-object}} &\quad& \text{evaluation.} \end{alignat*} Here \textsl{bindings} is an association list assigning objects of the same type to variables. In case a variable is not assigned anything in \textsl{bindings}, by default we assign the constant term family of this variable, which always is an object of the correct type. The interpretation of the program constants requires some auxiliary functions (cf.\ \cite{BergerEberlSchwichtenberg03}): \begin{alignat*}{2} &\texttt{(nbe-constructor-pattern?\ \textsl{term})} \index{nbe-constructor-pattern?@\texttt{nbe-constructor-pattern?}} &\quad& \text{test} \\ &\texttt{(nbe-inst?\ \textsl{constr-pattern} \textsl{obj})} \index{nbe-inst?@\texttt{nbe-inst?}} && \text{test} \\ &\texttt{(nbe-genargs \textsl{constr-pattern} \textsl{obj})} \index{nbe-genargs@\texttt{nbe-genargs}} && \text{generalized arguments} \\ % &\texttt{(nbe-select \textsl{pconst} \textsl{term})} % && \text{selects a rewrite rule} \\ &\texttt{(nbe-extract \textsl{termfam})} \index{nbe-extract@\texttt{nbe-extract}} && \text{extracts a term from a family} \\ &\texttt{(nbe-match \textsl{pattern} \textsl{term})} \index{nbe-match@\texttt{nbe-match}} \end{alignat*} Then we can define \begin{alignat*}{2} &\texttt{(nbe-reify \textsl{object})} \index{nbe-reify@\texttt{nbe-reify}} &\quad& \text{reification} \\ &\texttt{(nbe-reflect \textsl{term})} \index{nbe-reflect@\texttt{nbe-reflect}} && \text{reflection} \end{alignat*} and by means of these \begin{alignat*}{2} &\texttt{(nbe-normalize-term \textsl{term})} \index{nbe-normalize-term@\texttt{nbe-normalize-term}} &\quad& \text{normalization,} \end{alignat*} abbreviated \texttt{nt}\index{nt@\texttt{nt}}. The \texttt{if}-form needs a special treatment. In particular, for a full normalization of terms (including permutative conversions), we define a preprocessing step that $\eta$ expands the alternatives of all \texttt{if}-terms. The result contains \texttt{if}-terms with ground type alternatives only. \subsection{Substitution} Recall the generalities on substitutions in Section~\ref{SS:GenSubst}. We define simultaneous substitution for type and object variables in a term, via \texttt{tsubst} and \texttt{subst}. It is assumed that \texttt{subst} only affects those vars whose type is not changed by \texttt{tsubst}. In the abstraction case of the recursive definition, the abstracted variable may need to be renamed. However, its type can be affected by \texttt{tsubst}. Then the renaming cannot be made part of \texttt{subst}, because the condition above would be violated. Therefore we carry along a procedure renaming variables, which remembers the renaming of variables done so far. \begin{alignat*}{2} &\texttt{(term-substitute \textsl{term} \textsl{tosubst})}% \index{term-substitute@\texttt{term-substitute}} \\ &\texttt{(term-subst \textsl{term} \textsl{arg} \textsl{val})}% \index{term-subst@\texttt{term-subst}} \\ &\texttt{(compose-o-substitutions \textsl{subst1} \textsl{subst2})}% \index{compose-o-substitutions@\texttt{compose-o-substitutions}} \end{alignat*} The \texttt{o} in \texttt{compose-o-substitutions} indicates that we substitute for \emph{object} variables. However, since this is the most common form of substitution, we also write \texttt{compose-substitutions}% \index{compose-substitutions@\texttt{compose-substitutions}} instead. Display functions for substitutions are \begin{align*} &\texttt{(display-substitution \textsl{subst})}% \index{display-substitution@\texttt{display-substitution}} \\ &\texttt{(substitution-to-string \textsl{subst})}% \index{substitution-to-string@\texttt{substitution-to-string}} \end{align*} \section{Formulas and comprehension terms} \mylabel{S:Formulas} A prime formula\index{formula!prime} can have the form \begin{itemize} \item \texttt{(atom r)} with a term r of type boole, \item \texttt{(predicate a r1 ... rn)} with a predicate variable or constant \texttt{a} and terms \texttt{r1} \dots \texttt{rn}. \end{itemize} Formulas are built from prime formulas by \begin{itemize} \item implication \texttt{(imp \textsl{formula1} \textsl{formula2})} \item conjunction \texttt{(and \textsl{formula1} \textsl{formula2})} \item tensor \texttt{(tensor \textsl{formula1} \textsl{formula2})} \item all quantification \texttt{(all \textsl{x} \textsl{formula})} \item existential quantification \texttt{(ex \textsl{x} \textsl{formula})} \item all quantification \texttt{(allnc \textsl{x} \textsl{formula})} without computational content \item existential quantification \texttt{(exnc \textsl{x} \textsl{formula})} without computational content \end{itemize} Moreover we have classical existential quantification in an arithmetical and a logical form: \begin{alignat*}{2} &\texttt{(exca (\textsl{x1}\dots) \textsl{formula})} \index{exca@\texttt{exca}} &\quad& \text{arithmetical version} \\ &\texttt{(excl (\textsl{x1} \dots) \textsl{formula})} \index{excl@\texttt{excl}} && \text{logical version.} \end{alignat*} Here we allow that the quantified variable is formed without \verb#^#, i.e.\ ranges over total objects only. Formulas can be \emph{unfolded}\index{formula!unfolded} in the sense that the all classical existential quantifiers are replaced according to their definiton. Inversely a formula can be \emph{folded}\index{formula!folded} in the sense that classical existential quantifiers are introduced wherever possible. \emph{Comprehension terms}\index{comprehension term} have the form \texttt{(cterm \textsl{vars} \textsl{formula})}. Note that \textsl{formula} may contain further free variables. % \subsection*{Interface} Tests: \begin{align*} &\texttt{(atom-form?\ \textsl{formula})} \index{atom-form?@\texttt{atom-form?}} \\ % &\texttt{(pvar-form?\ \textsl{formula})} % \index{pvar-form?@\texttt{pvar-form?}} % \\ % &\texttt{(predconst-form?\ \textsl{formula})} % \index{predconst-form?@\texttt{predconst-form?}} % \\ &\texttt{(predicate-form?\ \textsl{formula})} \index{predicate-form?@\texttt{predicate-form?}} \\ &\texttt{(prime-form?\ \textsl{formula})} \index{prime-form?@\texttt{prime-form?}} \\ &\texttt{(imp-form?\ \textsl{formula})} \index{imp-form?@\texttt{imp-form?}} \\ &\texttt{(and-form?\ \textsl{formula})} \index{and-form?@\texttt{and-form?}} \\ &\texttt{(tensor-form?\ \textsl{formula})} \index{tensor-form?@\texttt{tensor-form?}} \\ &\texttt{(all-form?\ \textsl{formula})} \index{all-form?@\texttt{all-form?}} \\ &\texttt{(ex-form?\ \textsl{formula})} \index{ex-form?@\texttt{ex-form?}} \\ &\texttt{(allnc-form?\ \textsl{formula})} \index{allnc-form?@\texttt{allnc-form?}} \\ &\texttt{(exnc-form?\ \textsl{formula})} \index{exnc-form?@\texttt{exnc-form?}} \\ &\texttt{(exca-form?\ \textsl{formula})} \index{exca-form?@\texttt{exca-form?}} \\ &\texttt{(excl-form?\ \textsl{formula})} \index{excl-form?@\texttt{excl-form?}} \end{align*} and also \begin{align*} &\texttt{(quant-prime-form?\ \textsl{formula})} \index{quant-prime-form?@\texttt{quant-prime-form?}} \\ &\texttt{(quant-free?\ \textsl{formula}).} \index{quant-free?@\texttt{quant-free?}} \end{align*} We need constructors and accessors for prime formulas \begin{alignat*}{2} &\texttt{(make-atomic-formula \textsl{boolean-term})} \index{make-atomic-formula@\texttt{make-atomic-formula}} \\ &\texttt{(make-predicate-formula \textsl{predicate} \textsl{term1} \dots)} \index{make-predicate-formula@\texttt{make-predicate-formula}} \\ &\texttt{atom-form-to-kernel} \index{atom-form-to-kernel@\texttt{atom-form-to-kernel}} \\ &\texttt{predicate-form-to-predicate} \index{predicate-form-to-predicate@\texttt{predicate-form-to-predicate}} \\ &\texttt{predicate-form-to-args.} \index{predicate-form-to-args@\texttt{predicate-form-to-args}} \end{alignat*} We also have constructors for special atomic formulas \begin{alignat*}{2} &\texttt{(make-eq \textsl{term1} \textsl{term2})} \index{make-eq@\texttt{make-eq}} &\quad& \text{constructor for equalities} \\ &\texttt{(make-= \textsl{term1} \textsl{term2})} \index{make-=@\texttt{make-=}} &\quad& \text{constructor for equalities on finalgs} \\ &\texttt{(make-total \textsl{term})} \index{make-total@\texttt{make-total}} &\quad& \text{constructor for totalities} \\ &\texttt{(make-e \textsl{term})} \index{make-e@\texttt{make-e}} &\quad& \text{constructor for existence on finalgs} \\ &\texttt{truth} \index{truth@\texttt{truth}} \\ &\texttt{falsity} \index{falsity@\texttt{falsity}} \\ &\texttt{falsity-log.} \index{falsity-log@\texttt{falsity-log}} \end{alignat*} We need constructors and accessors for implications \begin{alignat*}{2} &\texttt{(make-imp \textsl{premise} \textsl{conclusion})} \index{make-imp@\texttt{make-imp}} &\quad& \text{constructor} \\ &\texttt{(imp-form-to-premise \textsl{imp-formula})} \index{imp-form-to-premise@\texttt{imp-form-to-premise}} && \text{accessor} \\ &\texttt{(imp-form-to-conclusion \textsl{imp-formula})} \index{imp-form-to-conclusion@\texttt{imp-form-to-conclusion}} && \text{accessor,} \end{alignat*} conjunctions \begin{alignat*}{2} &\texttt{(make-and \textsl{formula1} \textsl{formula2})} \index{make-and@\texttt{make-and}} &\quad& \text{constructor} \\ &\texttt{(and-form-to-left \textsl{and-formula})} \index{and-form-to-left@\texttt{and-form-to-left}} && \text{accessor} \\ &\texttt{(and-form-to-right \textsl{and-formula})} \index{and-form-to-right@\texttt{and-form-to-right}} && \text{accessor,} \end{alignat*} tensors \begin{alignat*}{2} &\texttt{(make-tensor \textsl{formula1} \textsl{formula2})} \index{make-tensor@\texttt{make-tensor}} &\quad& \text{constructor} \\ &\texttt{(tensor-form-to-left \textsl{tensor-formula})} \index{tensor-form-to-left@\texttt{tensor-form-to-left}} && \text{accessor} \\ &\texttt{(tensor-form-to-right \textsl{tensor-formula})} \index{tensor-form-to-right@\texttt{tensor-form-to-right}} && \text{accessor,} \end{alignat*} universally quantified formulas \begin{alignat*}{2} &\texttt{(make-all \textsl{var} \textsl{formula})} \index{make-all@\texttt{make-all}} &\quad& \text{constructor} \\ &\texttt{(all-form-to-var \textsl{all-formula})} \index{all-form-to-var@\texttt{all-form-to-var}} && \text{accessor} \\ &\texttt{(all-form-to-kernel \textsl{all-formula})} \index{all-form-to-kernel@\texttt{all-form-to-kernel}} && \text{accessor,} \end{alignat*} existentially quantified formulas \begin{alignat*}{2} &\texttt{(make-ex \textsl{var} \textsl{formula})} \index{make-ex@\texttt{make-ex}} &\quad& \text{constructor} \\ &\texttt{(ex-form-to-var \textsl{ex-formula})} \index{ex-form-to-var@\texttt{ex-form-to-var}} && \text{accessor} \\ &\texttt{(ex-form-to-kernel \textsl{ex-formula})} \index{ex-form-to-kernel@\texttt{ex-form-to-kernel}} && \text{accessor,} \end{alignat*} universally quantified formulas without computational content \begin{alignat*}{2} &\texttt{(make-allnc \textsl{var} \textsl{formula})} \index{make-allnc@\texttt{make-allnc}} &\quad& \text{constructor} \\ &\texttt{(allnc-form-to-var \textsl{allnc-formula})} \index{allnc-form-to-var@\texttt{allnc-form-to-var}} && \text{accessor} \\ &\texttt{(allnc-form-to-kernel \textsl{allnc-formula})} \index{allnc-form-to-kernel@\texttt{allnc-form-to-kernel}} && \text{accessor,} \end{alignat*} existentially quantified formulas without computational content \begin{alignat*}{2} &\texttt{(make-exnc \textsl{var} \textsl{formula})} \index{make-exnc@\texttt{make-exnc}} &\quad& \text{constructor} \\ &\texttt{(exnc-form-to-var \textsl{exnc-formula})} \index{exnc-form-to-var@\texttt{exnc-form-to-var}} && \text{accessor} \\ &\texttt{(exnc-form-to-kernel \textsl{exnc-formula})} \index{exnc-form-to-kernel@\texttt{exnc-form-to-kernel}} && \text{accessor,} \end{alignat*} existentially quantified formulas in the sense of classical arithmetic \begin{alignat*}{2} &\texttt{(make-exca \textsl{var} \textsl{formula})} \index{make-exca@\texttt{make-exca}} &\quad& \text{constructor} \\ &\texttt{(exca-form-to-var \textsl{exca-formula})} \index{exca-form-to-var@\texttt{exca-form-to-var}} && \text{accessor} \\ &\texttt{(exca-form-to-kernel \textsl{exca-formula})} \index{exca-form-to-kernel@\texttt{exca-form-to-kernel}} && \text{accessor,} \end{alignat*} existentially quantified formulas in the sense of classical logic \begin{alignat*}{2} &\texttt{(make-excl \textsl{var} \textsl{formula})} \index{make-excl@\texttt{make-excl}} &\quad& \text{constructor} \\ &\texttt{(excl-form-to-var \textsl{excl-formula})} \index{excl-form-to-var@\texttt{excl-form-to-var}} && \text{accessor} \\ &\texttt{(excl-form-to-kernel \textsl{excl-formula})} \index{excl-form-to-kernel@\texttt{excl-form-to-kernel}} && \text{accessor.} \end{alignat*} For convenience we also have as generalized constructors \begin{alignat*}{2} &\texttt{(mk-imp \textsl{formula} \textsl{formula1} {\dots})} \index{mk-imp@\texttt{mk-imp}} &\quad&\text{implication} \\ &\texttt{(mk-neg \textsl{formula1} {\dots})} \index{mk-neg@\texttt{mk-neg}} && \text{negation} \\ &\texttt{(mk-neg-log \textsl{formula1} {\dots})} \index{mk-neg-log@\texttt{mk-neg-log}} && \text{logical negation} \\ &\texttt{(mk-and \textsl{formula} \textsl{formula1} {\dots})} \index{mk-and@\texttt{mk-and}} && \text{conjunction} \\ &\texttt{(mk-tensor \textsl{formula} \textsl{formula1}\! {\dots}\!)} \index{mk-tensor@\texttt{mk-tensor}} && \text{tensor} \\ &\texttt{(mk-all \textsl{var1} {\dots}\ \textsl{formula})} \index{mk-all@\texttt{mk-all}} && \text{all-formula} \\ &\texttt{(mk-ex \textsl{var1} {\dots}\ \textsl{formula})} \index{mk-ex@\texttt{mk-ex}} && \text{ex-formula} \\ &\texttt{(mk-allnc \textsl{var1} {\dots}\ \textsl{formula})} \index{mk-allnc@\texttt{mk-allnc}} && \text{allnc-formula} \\ &\texttt{(mk-exnc \textsl{var1} {\dots}\ \textsl{formula})} \index{mk-exnc@\texttt{mk-exnc}} && \text{exnc-formula} \\ &\texttt{(mk-exca \textsl{var1} {\dots}\ \textsl{formula})} \index{mk-exca@\texttt{mk-exca}} && \text{classical ex-formula (arithmetical)} \\ &\texttt{(mk-excl \textsl{var1} {\dots}\ \textsl{formula})} \index{mk-excl@\texttt{mk-excl}} && \text{classical ex-formula (logical)} \end{alignat*} and as generalized accessors \begin{alignat*}{2} &\texttt{(imp-form-to-premises-and-final-conclusion \textsl{formula})} \\ %\index{imp-form-to-premises-and-final-conclusion@\texttt{imp-form-to-premises-and-final-conclusion}}\\ &\texttt{(tensor-form-to-parts \textsl{formula})} \index{tensor-form-to-parts@\texttt{tensor-form-to-parts}} \\ &\texttt{(all-form-to-vars-and-final-kernel \textsl{formula})} \index{all-form-to-vars-and-final-kernel@\texttt{all-form-to-vars-and{\dots}}} \\ &\texttt{(ex-form-to-vars-and-final-kernel \textsl{formula})} \index{ex-form-to-vars-and-final-kernel@\texttt{ex-form-to-vars-and{\dots}}} \end{alignat*} and similarly for \texttt{exca}-forms and \texttt{excl}-forms. Occasionally it is convenient to have \begin{alignat*}{2} &\texttt{(imp-form-to-premises \textsl{formula} <\textsl{n}>)} \index{imp-form-to-premises@\texttt{imp-form-to-premises}} &\quad& \text{all (first $n$) premises} \\ &\texttt{(imp-form-to-final-conclusion \textsl{formula} <\textsl{n}>)} \index{imp-form-to-final-conclusion@\texttt{imp-form-to-final-conclusion}} \end{alignat*} where the latter computes the final conclusion (conclusion after removing the first $n$ premises) of the formula. It is also useful to have some general procedures working for arbitrary quantified formulas. We provide \begin{alignat*}{2} &\texttt{(make-quant-formula \textsl{quant} \textsl{var1} \dots\ \textsl{kernel})} \index{make-quant-formula@\texttt{make-quant-formula}} &\quad& \text{constructor} \\ &\texttt{(quant-form-to-quant \textsl{quant-form})} \index{quant-form-to-quant@\texttt{quant-form-to-quant}} && \text{accessor} \\ &\texttt{(quant-form-to-vars \textsl{quant-form})} \index{quant-form-to-vars@\texttt{quant-form-to-vars}} && \text{accessor} \\ &\texttt{(quant-form-to-kernel \textsl{quant-form})} \index{quant-form-to-kernel@\texttt{quant-form-to-kernel}} && \text{accessor} \\ &\texttt{(quant-form?\ \textsl{x})} \index{quant-form?@\texttt{quant-form?}} && \text{test.} \end{alignat*} and for convenience also \[ \texttt{(mk-quant \textsl{quant} \textsl{var1} \dots\ \textsl{formula})\index{mk-quant@\texttt{mk-quant}}.} \] To fold and unfold formulas we have \begin{align*} &\texttt{(fold-formula \textsl{formula})} \index{fold-formula@\texttt{fold-formula}} \\ &\texttt{(unfold-formula \textsl{formula}).} \index{unfold-formula@\texttt{unfold-formula}} \end{align*} To test equality of formulas up to normalization and $\alpha$-equality we use \begin{align*} &\texttt{(classical-formula=?\ \textsl{formula1} \textsl{formula2})} \index{classical-formula=?@\texttt{classical-formula=?}} \\ &\texttt{(formula=?\ \textsl{formula1} \textsl{formula2}),} \index{formula=?@\texttt{formula=?}} % &\texttt{(formulas=?\\textsl{formulas1} \textsl{formulas2}),} \end{align*} where in the first procedure we unfold before comparing. Morever we need \begin{align*} &\texttt{(formula-to-free\ \textsl{formula}),} \index{formula-to-free@\texttt{formula-to-free}} \\ &\texttt{(nbe-formula-to-type\ \textsl{formula}),} \index{nbe-formula-to-type@\texttt{nbe-formula-to-type}} \\ &\texttt{(formula-to-prime-subformulas\ \textsl{formula}),} \index{formula-to-prime-subformulas@\texttt{formula-to-prime-subformulas}} \end{align*} Constructors, accessors and a test for comprehension terms are \begin{alignat*}{2} &\texttt{(make-cterm \textsl{var1} \dots\ \textsl{formula})} \index{make-cterm@\texttt{make-cterm}} &\quad& \text{constructor} \\ &\texttt{(cterm-to-vars \textsl{cterm})} \index{cterm-to-vars@\texttt{cterm-to-vars}} && \text{accessor} \\ &\texttt{(cterm-to-formula \textsl{cterm})} \index{cterm-to-formula@\texttt{cterm-to-formula}} && \text{accessor} \\ &\texttt{(cterm?\ \textsl{x})} \index{cterm?@\texttt{cterm?}} && \text{test.} \end{alignat*} Moreover we need \begin{align*} &\texttt{(cterm-to-free \textsl{cterm})} \index{cterm-to-free@\texttt{cterm-to-free}} \\ &\texttt{(cterm=?\ \textsl{x})} \index{cterm=?@\texttt{cterm=?}} \\ &\texttt{(classical-cterm=?\ \textsl{x})} \index{classical-cterm=?@\texttt{classical-cterm=?}} \\ &\texttt{(fold-cterm \textsl{cterm})} \index{fold-cterm@\texttt{fold-cterm}} \\ &\texttt{(unfold-cterm \textsl{cterm}).} \index{unfold-cterm@\texttt{unfold-cterm}} \end{align*} Normalization of formulas is done with \begin{alignat*}{2} &\texttt{(normalize-formula \textsl{formula})} \index{normalize-formula@\texttt{normalize-formula}} &\quad& \text{normalization,} \end{alignat*} abbreviated \texttt{nf}\index{nt@\texttt{nf}}. To check equality of formulas we use \begin{alignat*}{2} &\texttt{(classical-formula=? \textsl{formula1} \textsl{formula2})} \index{classical-formula=?@\texttt{classical-formula=?}} \\ &\texttt{(formula=? \textsl{formula1} \textsl{formula2})} \index{formula=?@\texttt{formula=?}} \end{alignat*} where the former unfolds the classical existential quantifiers and normalizes all subterms in its formulas. Display functions for formulas and comprehension terms are \begin{alignat*}{2} &\texttt{(formula-to-string \textsl{formula})} \index{formula-to-string@\texttt{formula-to-string}} \\ &\texttt{(cterm-to-string \textsl{cterm})}. \index{cterm-to-string@\texttt{cterm-to-string}} \end{alignat*} The former is abbreviated \texttt{nf}\index{nf@\texttt{nf}}. We can define simultaneous substitution for type, object and predicate variables in a formula, via \texttt{tsubst}, \texttt{subst} and \texttt{psubst}. It is assumed that \texttt{subst} only affects those variables whose type is not changed by \texttt{tsubst}, and that \texttt{psubst} only affects those predicate variables whose arity is not changed by \texttt{tsubst}. In the quantifier case of the recursive definition, the abstracted variable may need to be renamed. However, its type can be affected by \texttt{tsubst}. Then the renaming cannot be made part of \texttt{subst}, because then the condition above would be violated. Therefore we carry along a procedure \texttt{rename}\index{rename} renaming variables, which remembers the renaming of variables done so far. We will also need formula substitution to compute the formula of an assumption constant. However, there (type and) predicate variables are (implicitely) considered to be bound. Therefore, we also have to carry along a procedure \texttt{prename}\index{prename@\texttt{prename}} renaming predicate variables, which remembers the renaming of predicate variables done so far. \begin{alignat*}{2} &\texttt{(formula-substitute \textsl{formula} \textsl{topsubst})}% \index{formula-substitute@\texttt{formula-substitute}} \\ &\texttt{(formula-subst \textsl{formula} \textsl{arg} \textsl{val})}% \index{formula-subst@\texttt{formula-subst}} \\ &\texttt{(cterm-substitute \textsl{cterm} \textsl{topsubst})}% \index{cterm-substitute@\texttt{cterm-substitute}} \\ &\texttt{(cterm-subst \textsl{cterm} \textsl{arg} \textsl{val})}% \index{cterm-subst@\texttt{cterm-subst}} \end{alignat*} Display functions for predicate substitutions are \begin{align*} &\texttt{(display-p-substitution \textsl{psubst})}% \index{display-p-substitution@\texttt{display-p-substitution}} \\ &\texttt{(p-substitution-to-string \textsl{psubst})}% \index{p-substitution-to-string@\texttt{p-substitution-to-string}} \end{align*} \section{Assumption variables and constants} \mylabel{S:AssumptionVarConst} \subsection{Assumption variables} Assumption variables are for proofs what variables are for terms. The main difference, however, is that assumption variables have formulas as types, and that formulas may contain free variables. Therefore we must be careful when substituting terms for variables in assumption variables. Our solution (as in Matthes' thesis \cite{Matthes98}) is to consider an assumption variable as a pair of a (typefree) identifier and a formula, and to take equality of assumption variables to mean that both components are equal. Rather than using symbols as identifiers we prefer to use numbers (i.e.\ indices). However, sometimes it is useful to provide an optional string as name for display purposes. % \subsection*{Interface} We need a constructor, accessors and tests for assumption variables. \begin{alignat*}{2} &\texttt{(make-avar \textsl{formula} \textsl{index} \textsl{name})} \index{make-avar@\texttt{make-avar}} &\quad& \text{constructor} \\ &\texttt{(avar-to-formula \textsl{avar})} \index{avar-to-formula@\texttt{avar-to-formula}} && \text{accessor} \\ &\texttt{(avar-to-index \textsl{avar})} \index{avar-to-index@\texttt{avar-to-index}} && \text{accessor} \\ &\texttt{(avar-to-name \textsl{avar})} \index{avar-to-name@\texttt{avar-to-name}} && \text{accessor} \\ &\texttt{(avar?\ \textsl{x})} \index{avar?@\texttt{avar?}} && \text{test} \\ &\texttt{(avar=?\ \textsl{avar1} \textsl{avar2})} \index{avar?@\texttt{avar=?}} && \text{test.} \end{alignat*} For convenience we have the function \begin{alignat*}{2} &\texttt{(mk-avar \textsl{formula} <\textsl{index}> <\textsl{name}>)} \end{alignat*} The formula is a required argument; however, the remaining arguments are optional. The default for the name string is \texttt{u}. We also require that a function \begin{align*} &\texttt{(formula-to-new-avar \textsl{formula})} \end{align*} is defined that returns an assumption variable of the requested formula different from all assumption variables that have ever been returned by any of the specified functions so far. % \textbf{Implementation.} % % % Assumption variables are implemented as lists: % % % $$\texttt{(avar \textsl{formula} \textsl{index} \textsl{name})}.$$ % \subsection*{Assumption constants} % \mylabel{S:Aconst} An assumption constant appears in a proof, as an axiom, a theorem or a global assumption. Its formula is given as an \inquotes{uninstantiated formula}, where only type and predicate variables can occur freely; these are considered to be bound in the assumption constant. In the proof the bound type variables are implicitely instantiated by types, and the bound predicate variables by comprehension terms (the arity of a comprehension term is the type-instantiated arity of the corresponding predicate variable). Since we do not have type and predicate quantification in formulas, the assumption constant contains these parts left implicit in the proof: \texttt{tsubst} and \texttt{pinst} (which will become a \texttt{psubst}, once the arities of predicate variables are type-instantiated with \texttt{tsubst}). So we have assumption constants of the following kinds: \begin{itemize} \item axioms, \item theorems, and \item global assumptions. \end{itemize} To normalize a proof we will first translate it into a term, then normalize the term and finally translate the normal term back into a proof. To make this work, in case of axioms we pass to the term appropriate formulas: all-formulas for induction, an existential formula for existence introduction, and an existential formula together with a conclusion for existence elimination. During normalization of the term these formulas are passed along. When the normal form is reached, we have to translate back into a proof. Then these formulas are used to reconstruct the axiom in question. Internally, the formula of an assumption constant is split into an uninstantiated formula where only type and predicate variables can occur freely, and a substitution for at most these type and predicate variables. The formula assumed by the constant is the result of carrying out this substitution in the uninstantiated formula. Note that free variables may again occur in the assumed formula. For example, assumption constants axiomatizing the existential quantifier will internally have the form \begin{alignat*}{2} &\texttt{(aconst Ex-Intro $\forall \hat{x}^\alpha.\hat{P}(\hat{x}) \to \ex \hat{x}^\alpha \hat{P}(\hat{x})$ $(\alpha \mapsto \tau, \hat{P}^{(\alpha)} \mapsto \set{\hat{z}^\tau}{A})$)} \index{Ex-Intro@\texttt{Ex-Intro}} \\ &\texttt{(aconst Ex-Elim $\ex \hat{x}^\alpha \hat{P}(\hat{x}) \to (\forall \hat{x}^\alpha. \hat{P}(\hat{x}) \to \hat{Q}) \to \hat{Q}$} \\ &\qquad\qquad\qquad\qquad \texttt{$(\alpha \mapsto \tau, \hat{P}^{(\alpha)} \mapsto \set{\hat{z}^\tau}{A}, \hat{Q} \mapsto \set{}{B})$)} \index{Ex-Elim@\texttt{Ex-Elim}} \end{alignat*} \textbf{Interface for general assumption constants.} To avoid duplication of code it is useful to formulate some procedures generally for arbitrary assumption constants\index{assumption constant}, i.e.\ for all of the forms listed above. \begin{alignat*}{2} &\texttt{(make-aconst \textsl{name} \textsl{kind} \textsl{uninst-formula} \textsl{tpsubst}} \\ &\qquad \texttt{\textsl{repro-formula1} \dots)} \index{make-aconst@\texttt{make-aconst}} &\quad& \text{constructor} \\ &\texttt{(aconst-to-name \textsl{aconst})} \index{aconst-to-name@\texttt{aconst-to-name}} && \text{accessor} \\ &\texttt{(aconst-to-kind \textsl{aconst})} \index{aconst-to-kind@\texttt{aconst-to-kind}} && \text{accessor} \\ &\texttt{(aconst-to-uninst-formula \textsl{aconst})} \index{aconst-to-uninst-formula@\texttt{aconst-to-uninst-formula}} && \text{accessor} \\ &\texttt{(aconst-to-tpsubst \textsl{aconst})} \index{aconst-to-tpsubst@\texttt{aconst-to-tpsubst}} && \text{accessor} \\ &\texttt{(aconst-to-repro-formulas \textsl{aconst})} \index{aconst-to-repro-formulas@\texttt{aconst-to-repro-formulas}} && \text{accessor} \\ &\texttt{(aconst?\ \textsl{x})} \index{aconst?@\texttt{aconst?}} && \text{test.} \end{alignat*} To construct the formula associated with an aconst, it is useful to separate the instantiated formula from the variables to be generalized. The latter can be obtained as free variables in inst-formula. We therefore provide \begin{alignat*}{2} &\texttt{(aconst-to-inst-formula \textsl{aconst})} \index{aconst-to-inst-formula@\texttt{aconst-to-inst-formula}} \\ &\texttt{(aconst-to-formula \textsl{aconst})} \index{aconst-to-formula@\texttt{aconst-to-formula}} \end{alignat*} We also provide \begin{alignat*}{2} &\texttt{(aconst? \textsl{aconst})} \index{aconst?@\texttt{aconst?}} \\ &\texttt{(aconst=?\ \textsl{aconst1} \textsl{aconst2})} \index{aconst=?@\texttt{aconst=?}} \\ &\texttt{(aconst-without-rules?\ \textsl{aconst})} \index{aconst-without-rules?@\texttt{aconst-without-rules?}} \\ &\texttt{(aconst-to-string\ \textsl{aconst})} \index{aconst-to-string@\texttt{aconst-to-string}} \end{alignat*} \subsection{Axiom constants} \mylabel{SS:AxiomConst} % \paragraph{Axioms} % \mylabel{SS:Ax} We use the natural numbers as a prototypical finitary algebra; recall Figure~\ref{F:nat}. Assume that $n$, $p$ are variables of type $\nat$, $\boole$. Let $\Eq$ denote the equality relation in the model. Recall the domain of type $\boole$, consisting of $\true$, $\false$ and the bottom element $\bottom$. The boolean valued functions equality $=_{nat} \colon \nat \to \nat \to \boole$ and existence (definedness, totality) $e_{nat} \colon \nat \to \boole$ need to be continuous. So we have \begin{align*} \eqrel{0}{0} &\Eq \true \\ \eqrel{0}{S \hat{n}} \Eq \eqrel{S \hat{n}}{0} &\Eq \false &e(0) &\Eq \true \\ \eqrel{S \hat{n}_1}{S \hat{n}_2} &\Eq \eqrel{\hat{n}_1}{\hat{n}_2} &e(S \hat{n}) &\Eq e(\hat{n}) \\ \eqrel{\bottom_{nat}}{\hat{n}} \Eq \eqrel{\hat{n}}{\bottom_{nat}} &\Eq \bottom &e(\bottom_{\nat}) &\Eq \bottom \\ \eqrel{\infty_{nat}}{\hat{n}} \Eq \eqrel{\hat{n}}{\infty_{nat}} &\Eq \bottom &e(\infty_{\nat}) &\Eq \bottom \end{align*} Write $T$, $F$ for $\atom(\true)$, $\atom(\false)$, $r=s$ for $\atom(\eqrel{r}{s})$ and $E(r)$ for $\atom(e(r))$. We stipulate as axioms \begin{alignat*}{2} &T &\quad&\texttt{Truth-Axiom}\index{Truth-Axiom@\texttt{Truth-Axiom}} \\[1ex] &\hat{x} \Eq \hat{x} &\quad&\text{\texttt{Eq-Refl}\index{Eq-Refl@\texttt{Eq-Refl}}} \\ &\hat{x}_1 \Eq \hat{x}_2 \to \hat{x}_2 \Eq \hat{x}_1 &\quad&\text{\texttt{Eq-Symm}\index{Eq-Symm@\texttt{Eq-Symm}}} \\ &\hat{x}_1 \Eq \hat{x}_2 \to \hat{x}_2 \Eq \hat{x}_3 \to \hat{x}_1 \Eq \hat{x}_3 &\quad&\text{\texttt{Eq-Trans}\index{Eq-Trans@\texttt{Eq-Trans}}} \\[1ex] &\forall \hat{x} \hat{f}_1 \hat{x} \Eq \hat{f}_2 \hat{x} \to \hat{f}_1 \Eq \hat{f}_2 &&\text{\texttt{Eq-Ext}% \index{Extensionality@\texttt{Extensionality}}} \\ &\hat{x}_1 \Eq \hat{x}_2 \to \hat{P}(\hat{x}_1) \to \hat{P}(\hat{x}_2) &\quad&\text{\texttt{Eq-Compat}% \index{Compatibility@\texttt{Compatibility}}} % \\[1ex] % &\hat{n}_1 \Eq \hat{n}_2 \to E(\hat{n}_1) \to E(\hat{n}_2) \to % \hat{n}_1 = \hat{n}_2 % &&\text{\texttt{Eq-to-=}}\index{Eq-to-=@\texttt{Eq-to-=}} % \\ % &\hat{n}_1 = \hat{n}_2 \to \hat{n}_1 \Eq \hat{n}_2 % &&\text{\texttt{=-to-Eq}}\index{equals-to-Eq@\texttt{=-to-Eq}} % \\[1ex] % &\Total(\hat{n}) \to E(\hat{n}) % &&\text{\texttt{Total-to-E}}\index{Total-to-E@\texttt{Total-to-E}} % \\ % &E(\hat{n}) \to \Total(\hat{n}) % &&\text{\texttt{E-to-Total}}\index{E-to-Total@\texttt{E-to-Total}} % \\[1ex] % &c_1 \vec{\hat{x}}_1 \Eq c_2 \vec{\hat{x}}_2 \to F % &&\text{\texttt{Constr-Disjoint}}% % \index{Constr-Disjoint@\texttt{Constr-Disjoint@}} % \\ % &c \vec{\hat{x}}_1 \Eq c \vec{\hat{x}}_2 \to \hat{x}_{1i} \Eq \hat{x}_{2i} % &&\text{\texttt{Constr-Inj}}% % \index{Constr-Inj@\texttt{Constr-Inj@}} \\[1ex] &\Total_{\rho \to\sigma}(\hat{f}) \leftrightarrow \forall \hat{x}.\Total_{\rho}(\hat{x}) \to \Total_{\sigma}(\hat{f} \hat{x}) &&\text{\texttt{Total}}\index{Total@\texttt{Total}} \\ &\Total_{\rho}(c) &&\text{\texttt{Constr-Total}}\index{Constr-Total@\texttt{Constr-Total}} \\ &\Total(c \vec{\hat{x}}) \to \Total(\hat{x}_i) &&\text{\texttt{Constr-Total-Args}}% \index{Constr-Total-Args@\texttt{Constr-Total-Args}} % \\ % &\Total_{\rho}(\bottom) \to F % &&\text{\texttt{Bottom-not-Total}}% % \index{Bottom-Not-Total@\texttt{Bottom-Not-Total}} \\ \intertext{and for every finitary algebra, e.g.\ \texttt{nat}} &\hat{n}_1 \Eq \hat{n}_2 \to E(\hat{n}_1) \to \hat{n}_1 = \hat{n}_2 &&\text{\texttt{Eq-to-=-1-nat}\index{Eq-to-=-1-nat@\texttt{Eq-to-=-1-nat}}} \\ &\hat{n}_1 \Eq \hat{n}_2 \to E(\hat{n}_2) \to \hat{n}_1 = \hat{n}_2 &&\text{\texttt{Eq-to-=-2-nat}\index{Eq-to-=-2-nat@\texttt{Eq-to-=-2-nat}}} \\ &\hat{n}_1 = \hat{n}_2 \to \hat{n}_1 \Eq \hat{n}_2 &&\text{\texttt{=-to-Eq-nat}\index{equals-to-Eq-nat@\texttt{=-to-Eq-nat}}} \\ &\hat{n}_1 = \hat{n}_2 \to E(\hat{n}_1) &&\text{\texttt{=-to-E-1-nat}\index{equals-to-E-1-nat@\texttt{=-to-E-1-nat}}} \\ &\hat{n}_1 = \hat{n}_2 \to E(\hat{n}_2) &&\text{\texttt{=-to-E-2-nat}\index{equals-to-E-2-nat@\texttt{=-to-E-2-nat}}} \\ &\Total(\hat{n}) \to E(\hat{n}) &&\text{\texttt{Total-to-E-nat}\index{Total-to-E-nat@\texttt{Total-to-E-nat}}} \\ &E(\hat{n}) \to \Total(\hat{n}) &&\text{\texttt{E-to-Total-nat}\index{E-to-Total-nat@\texttt{E-to-Total-nat}}} \end{alignat*} Here $c$ is a constructor. Notice that in $\Total(c \vec{\hat{x}}) \to \Total(\hat{x}_i)$, the type of $(c \vec{\hat{x}})$ need not be a finitary algebra, and hence $\hat{x}_i$ may have a function type. % Further notice that $\Total_{\rho}(\bottom) \to F$ is also necessary % for $\rho$ an infinitary ground type. \begin{remark*} $(E(\hat{n}_1) \to \hat{n}_1 = \hat{n}_2) \to (E(\hat{n}_2) \to \hat{n}_1 = \hat{n}_2) \to \hat{n}_1 \Eq \hat{n}_2$ is \emph{not} valid in our intended model (see Figure~\ref{F:nat}), since we have \emph{two} distinct undefined objects $\bottom_{nat}$ and $\infty_{nat}$. \end{remark*} We abbreviate \begin{alignat*}{2} &\forall \hat{x}.\Total_{\rho}(\hat{x}) \to A &\quad\hbox{by}\quad& \forall x A,\\ &\exists \hat{x}.\Total_{\rho}(\hat{x}) \land A &\quad\hbox{by}\quad& \exists x A. \end{alignat*} Formally, these abbreviations appear as axioms \begin{alignat*}{2} &\forall x \hat{P}(x) \to \forall \hat{x}. \Total(\hat{x}) \to \hat{P}(\hat{x}) &\quad&\texttt{All-AllPartial}\index{All-AllPartial@\texttt{All-AllPartial}} \\ &(\forall \hat{x}. \Total(\hat{x}) \to \hat{P}(\hat{x})) \to \forall x \hat{P}(x) &\quad&\texttt{AllPartial-All}\index{AllPartial-All@\texttt{AllPartial-All}} \\ &\exists x \hat{P}(x) \to \exists \hat{x}. \Total(\hat{x}) \land \hat{P}(\hat{x}) &\quad&\texttt{Ex-ExPartial}\index{Ex-ExPartial@\texttt{Ex-ExPartial}} \\ &(\exists \hat{x}. \Total(\hat{x}) \land \hat{P}(\hat{x})) \to \exists x \hat{P}(x) &\quad&\texttt{ExPartial-Ex}\index{ExPartial-Ex@\texttt{ExPartial-Ex}} \\ \intertext{and for every finitary algebra, e.g.\ \texttt{nat}} &\forall n \hat{P}(n) \to \forall \hat{n}. E(\hat{n}) \to \hat{P}(\hat{n}) &\quad&\texttt{All-AllPartial-nat}% \index{All-AllPartial-nat@\texttt{All-AllPartial-nat}} \\ &(\exists \hat{n}. E(\hat{n}) \land \hat{P}(\hat{n})) \to \exists n \hat{P}(n) &\quad&\texttt{ExPartial-Ex-nat}% \index{ExPartial-Ex-nat@\texttt{ExPartial-Ex-nat}} \end{alignat*} Notice that \texttt{AllPartial-All-nat}% \index{AllPartial-All-nat@\texttt{AllPartial-All-nat},} i.e.\ $(\forall \hat{n}. E(\hat{n}) \to \hat{P}(\hat{n})) \to \forall n \hat{P}(n)$ is provable (since $E(n) \cnv T$). Similarly, \texttt{Ex-ExPartial-nat}% \index{Ex-ExPartial-nat@\texttt{Ex-ExPartial-nat}}, i.e.\ $\exists n \hat{P}(n) \to \exists \hat{n}. E(\hat{n}) \land \hat{P}(\hat{n})$ is provable. Finally we have axioms for the existential quantifier \begin{alignat*}{2} &\forall \hat{x}^\alpha.\hat{P}(\hat{x}) \to \ex \hat{x}^\alpha \hat{P}(\hat{x}) &\quad&\text{\texttt{Ex-Intro}\index{Ex-Intro@\texttt{Ex-Intro}}} \\ &\ex \hat{x}^\alpha \hat{P}(\hat{x}) \to (\forall \hat{x}^\alpha. \hat{P}(\hat{x}) \to \hat{Q}) \to \hat{Q} &\quad&\text{\texttt{Ex-Elim}\index{Ex-Elim@\texttt{Ex-Elim}}} \end{alignat*} The assumption constants corresponding to these axioms are \begin{alignat*}{2} &\texttt{truth-aconst}\index{truth-aconst@\texttt{truth-aconst}} &\quad&\text{for \texttt{Truth-Axiom}\index{Truth-Axiom@\texttt{Truth-Axiom}}} \\[1ex] &\texttt{eq-refl-aconst}\index{eq-refl-aconst@\texttt{eq-refl-aconst}} &\quad&\text{for \texttt{Eq-Refl}\index{Eq-Refl@\texttt{Eq-Refl}}} \\ &\texttt{eq-symm-aconst}\index{eq-symm-aconst@\texttt{eq-symm-aconst}} &\quad&\text{for \texttt{Eq-Symm}\index{Eq-Symm@\texttt{Eq-Symm}}} \\ &\texttt{eq-trans-aconst}\index{eq-trans-aconst@\texttt{eq-trans-aconst}} &\quad&\text{for \texttt{Eq-Trans}\index{Eq-Trans@\texttt{Eq-Trans}}} \\[1ex] &\texttt{ext-aconst}\index{ext-aconst@\texttt{ext-aconst}} &\quad&\text{for \texttt{Eq-Ext}\index{Eq-Ext@\texttt{Eq-Ext}}} \\ &\texttt{eq-compat-aconst}\index{eq-compat-aconst@\texttt{eq-compat-aconst}} &\quad&\text{for \texttt{Eq-Compat}\index{Eq-Compat@\texttt{Eq-Compat}}} \\ &\texttt{total-aconst}\index{total-aconst@\texttt{total-aconst}} &\quad&\text{for \texttt{Total}\index{Total@\texttt{Total}}} \\[1ex] &\texttt{(finalg-to-eq-to-=-1-aconst finalg)}% \index{finalg-to-eq-to-=-1-aconst@\texttt{finalg-to-eq-to-=-1-aconst}} &\quad&\text{for \texttt{Eq-to-=-1}\index{Eq-to-=-1@\texttt{Eq-to-=-1}}} \\ &\texttt{(finalg-to-eq-to-=-2-aconst finalg)}% \index{finalg-to-eq-to-=-2-aconst@\texttt{finalg-to-eq-to-=-2-aconst}} &\quad&\text{for \texttt{Eq-to-=-2}\index{Eq-to-=-2@\texttt{Eq-to-=-2}}} \\ &\texttt{(finalg-to-=-to-eq-aconst finalg)}% \index{finalg-to-=-to-eq-aconst@\texttt{finalg-to-=-to-eq-aconst}} &\quad&\text{for \texttt{=-to-Eq}\index{=-to-Eq@\texttt{=-to-Eq}}} \\ &\texttt{(finalg-to-=-to-e-1-aconst finalg)}% \index{finalg-to-=-to-e-1-aconst@\texttt{finalg-to-=-to-e-1-aconst}} &\quad&\text{for \texttt{=-to-E-1}\index{=-to-E-1@\texttt{=-to-E-1}}} \\ &\texttt{(finalg-to-=-to-e-2-aconst finalg)}% \index{finalg-to-=-to-e-2-aconst@\texttt{finalg-to-=-to-e-2-aconst}} &\quad&\text{for \texttt{=-to-E-2}\index{=-to-E-2@\texttt{=-to-E-2}}} \\ &\texttt{(finalg-to-total-to-e-aconst finalg)}% \index{finalg-to-total-to-e-aconst@\texttt{finalg-to-total-to-e-aconst}} &\quad&\text{for \texttt{Total-to-E}\index{Total-to-E@\texttt{Total-to-E}}} \\ &\texttt{(finalg-to-e-to-total-aconst finalg)}% \index{finalg-to-e-to-total-aconst@\texttt{finalg-to-e-to-total-aconst}} &\quad&\text{for \texttt{E-to-Total}\index{E-to-Total@\texttt{E-to-Total}}} \\[1ex] &\texttt{all-allpartial-aconst}% \index{all-allpartial-aconst@\texttt{all-allpartial-aconst}} &\quad&\text{for \texttt{All-AllPartial}% \index{All-AllPartial@\texttt{All-AllPartial}}} \\ &\texttt{allpartial-all-aconst}% \index{allpartial-all-aconst@\texttt{allpartial-all-aconst}} &\quad&\text{for \texttt{AllPartial-All}% \index{AllPartial-All@\texttt{AllPartial-All}}} \\ &\texttt{ex-expartial-aconst}% \index{ex-expartial-aconst@\texttt{ex-expartial-aconst}} &\quad&\text{for \texttt{Ex-ExPartial}% \index{Ex-ExPartial@\texttt{Ex-ExPartial}}} \\ &\texttt{expartial-ex-aconst}% \index{expartial-ex-aconst@\texttt{expartial-ex-aconst}} &\quad&\text{for \texttt{ExPartial-Ex}% \index{ExPartial-Ex@\texttt{ExPartial-Ex}}} \\[1ex] &\texttt{(finalg-to-all-allpartial-aconst finalg)}% \index{finalg-to-all-allpartial-aconst@\texttt{finalg-to-all-allpartial-aconst}} &\quad&\text{for \texttt{All-AllPartial}% \index{All-AllPartial@\texttt{All-AllPartial}}} \\ &\texttt{(finalg-to-expartial-ex-aconst finalg)}% \index{finalg-to-expartial-ex-aconst@\texttt{finalg-to-expartial-ex-aconst}} &\quad&\text{for \texttt{ExPartial-Ex}% \index{ExPartial-Ex@\texttt{ExPartial-Ex}}} \end{alignat*} % \paragraph{Induction axioms for simultaneous free algebras} % \mylabel{SS:IndSFA} We now spell out what precisely we mean by induction\index{induction} over simultaneous free algebras $\vec{\mu} = \mu\vec{\alpha}\,\vec{\kappa}$, with goal formulas $\forall x_j^{\mu_j}\, \hat{P}_j(x_j)$. For the constructor type \[ \kappa_i = \vec{\rho} \to (\vec{\sigma}_1 \to \alpha_{j_1}) \to \dots \to (\vec{\sigma}_n \to \alpha_{j_n}) \to \alpha_j \in \constrtypes(\vec{\alpha}) \] we have the \emph{step formula} \begin{align*} D_i := \forall y_1^{\rho_1},\dots,y_m^{\rho_m}, y_{m+1}^{\vec{\sigma}_1 \to \mu_{j_1}},\dots, y_{m+n}^{\vec{\sigma}_n \to \mu_{j_n}}. &\forall \vec{x}^{\vec{\sigma}_1}\, \hat{P}_{j_1}(y_{m+1}\vec{x}) \to \dots \to \\ &\forall \vec{x}^{\vec{\sigma}_n}\, \hat{P}_{j_n}(y_{m+n}\vec{x}) \to \\ &\hat{P}_j(\constr_i^{\vec{\mu}}(\vec{y})). \end{align*} Here $\vec{y} = y_1^{\rho_1},\dots,y_m^{\rho_m}, y_{m+1}^{\vec{\sigma}_1 \to \mu_{j_1}},\dots, y_{m+n}^{\vec{\sigma}_n \to \mu_{j_n}}$ are the \emph{components} of the object $\constr_i^{\vec{\mu}}(\vec{y})$ of type $\mu_j$ under consideration, and \[ \forall \vec{x}^{\vec{\sigma}_1}\, \hat{P}_{j_1}(y_{m+1}\vec{x}), \dots, \forall \vec{x}^{\vec{\sigma}_n}\, \hat{P}_{j_n}(y_{m+n}\vec{x}) \] are the hypotheses available when proving the induction step. The induction axiom $\ind_{\mu_j}$\index{Ind@\texttt{Ind}} then proves the formula \[ \ind_{\mu_j} \colon D_1 \to \dots \to D_k \to \forall x_j^{\mu_j}\, \hat{P}_j(x_j). \] We will often write $\ind_j$ for $\ind_{\mu_j}$. An example is \begin{alignat*}{2} &E_1 \to E_2 \to E_3 \to E_4 \to \forall x_1^\tree \hat{P}_1(x_1) \\ \intertext{with} &E_1 := \hat{P}_1(\leaf), \\ &E_2 := \forall x^{\tlist}.\hat{P}_2(x) \to \hat{P}_1(\branch(x)), \\ &E_3 := \hat{P}_2(\empt), \\ &E_4 := \forall x_1^{\tree},x_2^{\tlist}. \hat{P}_1(x_1) \to \hat{P}_2(x_2) \to \hat{P}_2(\tcons(x_1,x_2)). \end{alignat*} Here the fact that we deal with a simultaneous induction (over \texttt{tree} and \texttt{tlist}), and that we prove a formula of the form $\forall x^\tree \dots$, can all be inferred from what is given: the $\forall x^\tree \dots$ is right there, and for \texttt{tlist} we can look up the simultaneously defined algebras. -- The internal representation is \begin{alignat*}{2} &\texttt{(aconst Ind $E_1 \to E_2 \to E_3 \to E_4 \to \forall x_1^\tree \hat{P}_1(x_1)$} \\ &\qquad \qquad \qquad \texttt{$(\hat{P}_1 \mapsto \set{x_1^\tree}{A_1}, \hat{P}_2 \mapsto \set{x_2^\tlist}{A_2})$)} \index{Ind@\texttt{Ind}} \end{alignat*} A simplified version (without the recursive calls) of the induction axiom is the following cases axiom. \begin{alignat*}{2} &\texttt{(aconst Cases $E_1 \to E_2 \to \forall x_1^\tree \hat{P}_1(x_1)$ $(\hat{P}_1 \mapsto \set{x_1^\tree}{A_1})$)} \index{Cases@\texttt{Cases}} \\ \intertext{with} &E_1 := \hat{P}_1(\leaf), \\ &E_2 := \forall x^{\tlist} \hat{P}_1(\branch(x)). \end{alignat*} However, rather than using this as an assumption constant we will -- parallel to the \texttt{if}-construct\index{if-construct@\texttt{if}-construct} for terms -- use a \texttt{cases}-construct\index{cases-construct@\texttt{cases}-construct} for proofs. This does not change our notion of proof; it is done to have the \texttt{if}-construct in the extracted programs. The assumption constants corresponding to these axioms are generated by \begin{alignat*}{2} &\texttt{(all-formulas-to-ind-aconst \textsl{all-formula1} \dots)} \index{all-formulas-to-ind-aconst@\texttt{all-formulas-to-ind-aconst}} &\quad&\text{for \texttt{Ind}\index{Ind@\texttt{Ind}}} \\ &\texttt{(all-formula-to-cases-aconst \textsl{all-formula})} \index{all-formula-to-cases-aconst@\texttt{all-formula-to-cases-aconst}} &\quad&\text{for \texttt{Cases}\index{Cases@\texttt{Cases}}} \end{alignat*} % To deal with equality we need % \begin{alignat*}{2} % % % &\texttt{(refl-at \textsl{finalg})} % \index{refl-at@\texttt{refl-at}} % &\quad&\colon x=x\\ % % % &\texttt{(=-ax-at \textsl{finalg})} % \index{equal-ax-at@\texttt{=-ax-at}} % &&\colon x_1 = x_2 \to \hat{P} x_1 \to \hat{P} x_2. % % % \end{alignat*} For the introduction and elimination axioms \texttt{Ex-Intro}\index{Ex-Intro@\texttt{Ex-Intro}} and \texttt{Ex-Elim}\index{Ex-Elim@\texttt{Ex-Elim}} for the existential quantifier we provide \begin{align*} &\texttt{(ex-formula-to-ex-intro-aconst ex-formula)}% \index{ex-formula-to-ex-intro-aconst@\texttt{ex-formula-to-ex-intro-aconst}} \\ &\texttt{(ex-formula-and-concl-to-ex-elim-aconst ex-formula concl)}% \index{ex-formula-and-concl-to-ex-elim-const@\texttt{ex-for{\dots}-to-ex-elim-const}} \end{align*} and similarly for \texttt{exnc} instead of \texttt{ex}. To deal with inductively defined predicate constants, we need additional axioms with names \inquotes{Intro}\index{Intro} and \inquotes{Elim}\index{Elim}, which can be generated by \begin{align*} &\texttt{(number-and-idpredconst-to-intro-aconst i idpc)}% \index{number-and-idpredconst-to-intro-aconst@\texttt{number-and-idpredconst-to-intro-aconst}} \\ &\texttt{(imp-formulas-to-elim-aconst imp-formula1\ \dots)};% \index{imp-formulas-to-elim-aconst@\texttt{imp-formulas-to-elim-aconst}} \end{align*} here an \texttt{imp-formula} is expected to have the form $I(\vec{x}) \to A$. \subsection{Theorems} \mylabel{SS:Theorems} A theorem is a special assumption constant. % A typical example is % the transitivity of the successor function, with the internal % representation % \begin{alignat*}{2} % % % &\texttt{(aconst Trans-Suc $\forall k,m,n. k